code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = ["""image_processor""", """tokenizer"""]
_SCREAMING_SNAKE_CASE = """Pix2StructImageProcessor"""
_SCREAMING_SNAKE_CASE = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self , lowercase , lowercase ) -> str:
lowerCAmelCase = False
super().__init__(lowercase , lowercase )
def __call__( self , lowercase=None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 2_048 , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ) -> str:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
lowerCAmelCase = self.tokenizer
lowerCAmelCase = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
lowerCAmelCase = self.image_processor(
lowercase , return_tensors=lowercase , max_patches=lowercase , **lowercase )
else:
# add pixel_values and bbox
lowerCAmelCase = self.image_processor(
lowercase , return_tensors=lowercase , max_patches=lowercase , header_text=lowercase , **lowercase )
if text is not None and not self.image_processor.is_vqa:
lowerCAmelCase = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
if "attention_mask" in text_encoding:
lowerCAmelCase = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
lowerCAmelCase = text_encoding.pop("""input_ids""" )
else:
lowerCAmelCase = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _snake_case ( self , *lowercase , **lowercase ) -> List[str]:
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> Tuple:
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _snake_case ( self ) -> str:
lowerCAmelCase = self.tokenizer.model_input_names
lowerCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 46 | """simple docstring"""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
SCREAMING_SNAKE_CASE__ = _symbol_database.Default()
SCREAMING_SNAKE_CASE__ = _descriptor_pool.Default().AddSerializedFile(
b"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
SCREAMING_SNAKE_CASE__ = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = b"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
SCREAMING_SNAKE_CASE__ = 45
SCREAMING_SNAKE_CASE__ = 1_581
SCREAMING_SNAKE_CASE__ = 1_517
SCREAMING_SNAKE_CASE__ = 1_570
SCREAMING_SNAKE_CASE__ = 1_584
SCREAMING_SNAKE_CASE__ = 1_793
SCREAMING_SNAKE_CASE__ = 1_795
SCREAMING_SNAKE_CASE__ = 1_916
SCREAMING_SNAKE_CASE__ = 1_864
SCREAMING_SNAKE_CASE__ = 1_905
SCREAMING_SNAKE_CASE__ = 1_919
SCREAMING_SNAKE_CASE__ = 2_429
SCREAMING_SNAKE_CASE__ = 2_208
SCREAMING_SNAKE_CASE__ = 2_418
SCREAMING_SNAKE_CASE__ = 2_323
SCREAMING_SNAKE_CASE__ = 2_407
# @@protoc_insertion_point(module_scope)
| 150 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int ) -> list[list[int]]:
"""simple docstring"""
UpperCamelCase :list[list[int]] = []
UpperCamelCase :list[int] = []
UpperCamelCase :str = 0
UpperCamelCase :Dict = sum(__magic_name__ )
create_state_space_tree(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
return result
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : list[int] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : list[int] , __magic_name__ : list[list[int]] , __magic_name__ : int , ) -> None:
"""simple docstring"""
if sum(__magic_name__ ) > max_sum or (remaining_nums_sum + sum(__magic_name__ )) < max_sum:
return
if sum(__magic_name__ ) == max_sum:
result.append(__magic_name__ )
return
for index in range(__magic_name__ , len(__magic_name__ ) ):
create_state_space_tree(
__magic_name__ , __magic_name__ , index + 1 , [*path, nums[index]] , __magic_name__ , remaining_nums_sum - nums[index] , )
UpperCAmelCase_ : Any = [3, 34, 4, 12, 5, 2]
UpperCAmelCase_ : Union[str, Any] = 9
UpperCAmelCase_ : int = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 62 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
'''microsoft/swinv2-tiny-patch4-window8-256''': (
'''https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( _a ):
snake_case__ : List[Any] = """swinv2"""
snake_case__ : Tuple = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Tuple , __lowerCamelCase : List[str]=224 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : Tuple=96 , __lowerCamelCase : str=[2, 2, 6, 2] , __lowerCamelCase : Union[str, Any]=[3, 6, 12, 24] , __lowerCamelCase : int=7 , __lowerCamelCase : Dict=4.0 , __lowerCamelCase : Any=True , __lowerCamelCase : int=0.0 , __lowerCamelCase : str=0.0 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : Union[str, Any]="gelu" , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=0.02 , __lowerCamelCase : List[Any]=1E-5 , __lowerCamelCase : List[Any]=32 , **__lowerCamelCase : Optional[Any] , ):
super().__init__(**__lowerCamelCase )
UpperCamelCase :Optional[Any] = image_size
UpperCamelCase :str = patch_size
UpperCamelCase :Tuple = num_channels
UpperCamelCase :Optional[int] = embed_dim
UpperCamelCase :Optional[int] = depths
UpperCamelCase :int = len(__lowerCamelCase )
UpperCamelCase :List[Any] = num_heads
UpperCamelCase :Union[str, Any] = window_size
UpperCamelCase :Any = mlp_ratio
UpperCamelCase :Union[str, Any] = qkv_bias
UpperCamelCase :List[Any] = hidden_dropout_prob
UpperCamelCase :Any = attention_probs_dropout_prob
UpperCamelCase :List[Any] = drop_path_rate
UpperCamelCase :List[str] = hidden_act
UpperCamelCase :Optional[int] = use_absolute_embeddings
UpperCamelCase :Optional[int] = layer_norm_eps
UpperCamelCase :str = initializer_range
UpperCamelCase :List[str] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
UpperCamelCase :List[str] = int(embed_dim * 2 ** (len(__lowerCamelCase ) - 1) )
UpperCamelCase :Dict = (0, 0, 0, 0)
| 62 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[str] , __snake_case : Tuple , __snake_case : Dict=13 , __snake_case : Optional[int]=7 , __snake_case : Optional[int]=True , __snake_case : Dict=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : List[Any]=99 , __snake_case : List[Any]=32 , __snake_case : Union[str, Any]=5 , __snake_case : Optional[int]=4 , __snake_case : Optional[int]=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : int=0.1 , __snake_case : Optional[int]=0.1 , __snake_case : Optional[int]=512 , __snake_case : int=16 , __snake_case : List[str]=2 , __snake_case : Dict=0.02 , __snake_case : Tuple=4 , ) -> Union[str, Any]:
UpperCAmelCase : int = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : Tuple = use_attention_mask
UpperCAmelCase : Optional[int] = use_token_type_ids
UpperCAmelCase : Dict = use_labels
UpperCAmelCase : List[Any] = vocab_size
UpperCAmelCase : Tuple = hidden_size
UpperCAmelCase : Any = num_hidden_layers
UpperCAmelCase : Optional[int] = num_attention_heads
UpperCAmelCase : List[str] = intermediate_size
UpperCAmelCase : List[Any] = hidden_act
UpperCAmelCase : str = hidden_dropout_prob
UpperCAmelCase : str = attention_probs_dropout_prob
UpperCAmelCase : List[Any] = max_position_embeddings
UpperCAmelCase : Any = type_vocab_size
UpperCAmelCase : List[Any] = type_sequence_label_size
UpperCAmelCase : List[str] = initializer_range
UpperCAmelCase : int = num_choices
def A ( self : List[str] ) -> List[str]:
UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_attention_mask:
UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Optional[Any] = None
if self.use_token_type_ids:
UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def A ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : int = config_and_inputs
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = True
lowerCamelCase__ = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : str = FlaxRobertaModelTester(self )
@slow
def A ( self : Optional[Any] ) -> int:
for model_class_name in self.all_model_classes:
UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained('''roberta-base''' , from_pt=__snake_case )
UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
| 23 |
'''simple docstring'''
from math import isclose, sqrt
def snake_case_ ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : float ) -> tuple[float, float, float]:
UpperCAmelCase : Optional[int] = point_y / 4 / point_x
UpperCAmelCase : str = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
UpperCAmelCase : Any = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
UpperCAmelCase : Union[str, Any] = outgoing_gradient**2 + 4
UpperCAmelCase : Dict = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
UpperCAmelCase : List[Any] = (point_y - outgoing_gradient * point_x) ** 2 - 100
UpperCAmelCase : List[str] = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
UpperCAmelCase : Optional[Any] = x_minus if isclose(_lowerCAmelCase , _lowerCAmelCase ) else x_plus
UpperCAmelCase : Union[str, Any] = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def snake_case_ ( _lowerCAmelCase : float = 1.4 , _lowerCAmelCase : float = -9.6 ) -> int:
UpperCAmelCase : int = 0
UpperCAmelCase : float = first_x_coord
UpperCAmelCase : float = first_y_coord
UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : str = next_point(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F"{solution() = }")
| 23 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
__snake_case = list[list[float | int]]
def a ( __a , __a ) -> Matrix:
'''simple docstring'''
UpperCamelCase__ :int = len(__a )
UpperCamelCase__ :Matrix = [[0 for _ in range(size + 1 )] for _ in range(__a )]
UpperCamelCase__ :int
UpperCamelCase__ :int
UpperCamelCase__ :int
UpperCamelCase__ :int
UpperCamelCase__ :int
UpperCamelCase__ :float
for row in range(__a ):
for col in range(__a ):
UpperCamelCase__ :List[str] = matrix[row][col]
UpperCamelCase__ :Tuple = vector[row][0]
UpperCamelCase__ :Tuple = 0
UpperCamelCase__ :int = 0
while row < size and col < size:
# pivoting
UpperCamelCase__ :int = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__a , __a ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __a ):
UpperCamelCase__ :Optional[int] = augmented[rowa][col] / augmented[row][col]
UpperCamelCase__ :Any = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __a ):
for row in range(__a ):
UpperCamelCase__ :Optional[int] = augmented[row][col] / augmented[col][col]
for cola in range(__a , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__a )
]
def a ( __a ) -> Callable[[int], int]:
'''simple docstring'''
UpperCamelCase__ :int = len(__a )
UpperCamelCase__ :Matrix = [[0 for _ in range(__a )] for _ in range(__a )]
UpperCamelCase__ :Matrix = [[0] for _ in range(__a )]
UpperCamelCase__ :Matrix
UpperCamelCase__ :int
UpperCamelCase__ :int
UpperCamelCase__ :int
for x_val, y_val in enumerate(__a ):
for col in range(__a ):
UpperCamelCase__ :Union[str, Any] = (x_val + 1) ** (size - col - 1)
UpperCamelCase__ :Optional[int] = y_val
UpperCamelCase__ :str = solve(__a , __a )
def interpolated_func(__a ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__a ) )
return interpolated_func
def a ( __a ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def a ( __a = question_function , __a = 10 ) -> int:
'''simple docstring'''
UpperCamelCase__ :list[int] = [func(__a ) for x_val in range(1 , order + 1 )]
UpperCamelCase__ :list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCamelCase__ :int = 0
UpperCamelCase__ :Callable[[int], int]
UpperCamelCase__ :int
for poly in polynomials:
UpperCamelCase__ :str = 1
while func(__a ) == poly(__a ):
x_val += 1
ret += poly(__a )
return ret
if __name__ == "__main__":
print(F"""{solution() = }""") | 219 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''distilbert-base-uncased''': '''https://huggingface.co/distilbert-base-uncased/resolve/main/config.json''',
'''distilbert-base-uncased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-cased''': '''https://huggingface.co/distilbert-base-cased/resolve/main/config.json''',
'''distilbert-base-cased-distilled-squad''': (
'''https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'''
),
'''distilbert-base-german-cased''': '''https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json''',
'''distilbert-base-multilingual-cased''': (
'''https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'''
),
'''distilbert-base-uncased-finetuned-sst-2-english''': (
'''https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'distilbert'
_a = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=512 , UpperCamelCase_=False , UpperCamelCase_=6 , UpperCamelCase_=12 , UpperCamelCase_=768 , UpperCamelCase_=4 * 768 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.02 , UpperCamelCase_=0.1 , UpperCamelCase_=0.2 , UpperCamelCase_=0 , **UpperCamelCase_ , ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Dict = max_position_embeddings
UpperCamelCase__ :str = sinusoidal_pos_embds
UpperCamelCase__ :Any = n_layers
UpperCamelCase__ :str = n_heads
UpperCamelCase__ :Tuple = dim
UpperCamelCase__ :str = hidden_dim
UpperCamelCase__ :Dict = dropout
UpperCamelCase__ :int = attention_dropout
UpperCamelCase__ :Optional[Any] = activation
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Union[str, Any] = qa_dropout
UpperCamelCase__ :Dict = seq_classif_dropout
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ )
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 219 | 1 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __a = 16 , __a = 88 , __a = None , __a = 1 , __a = 0.0 , __a = 32 , __a = None , __a = False , __a = None , __a = None , __a = "geglu" , __a = None , ):
super().__init__()
__lowerCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__a , attention_head_dim=__a , in_channels=__a , num_layers=__a , dropout=__a , norm_num_groups=__a , cross_attention_dim=__a , attention_bias=__a , sample_size=__a , num_vector_embeds=__a , activation_fn=__a , num_embeds_ada_norm=__a , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__lowerCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__lowerCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__lowerCAmelCase = [1, 0]
def snake_case ( self , __a , __a , __a=None , __a=None , __a=None , __a = True , ):
__lowerCAmelCase = hidden_states
__lowerCAmelCase = []
__lowerCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__lowerCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__lowerCAmelCase = self.transformer_index_for_condition[i]
__lowerCAmelCase = self.transformers[transformer_index](
__a , encoder_hidden_states=__a , timestep=__a , cross_attention_kwargs=__a , return_dict=__a , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__lowerCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__lowerCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__a )
| 57 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , __a , __a=None , __a=True , __a=None , **__a ):
__lowerCAmelCase = parent
__lowerCAmelCase = config_class
__lowerCAmelCase = has_text_modality
__lowerCAmelCase = kwargs
__lowerCAmelCase = common_properties
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = (
["hidden_size", "num_attention_heads", "num_hidden_layers"]
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(["vocab_size"] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(__a , __a ) , msg=f"`{prop}` does not exist" )
# Test that config has the common properties as setter
for idx, name in enumerate(__a ):
try:
setattr(__a , __a , __a )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(__a ):
try:
__lowerCAmelCase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(__a , __a ) , __a , msg=f"`{name} value {idx} expected, but was {getattr(__a , __a )}" )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , __a )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , "config.json" )
config_first.to_json_file(__a )
__lowerCAmelCase = self.config_class.from_json_file(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict )
__lowerCAmelCase = "test"
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCAmelCase = os.path.join(__a , __a )
config_first.save_pretrained(__a )
__lowerCAmelCase = self.config_class.from_pretrained(__a , subfolder=__a )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def snake_case ( self ):
__lowerCAmelCase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
__lowerCAmelCase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def snake_case ( self ):
if self.config_class.is_composition:
return
__lowerCAmelCase = self.config_class()
self.parent.assertIsNotNone(__a )
def snake_case ( self ):
__lowerCAmelCase = copy.deepcopy(__a )
__lowerCAmelCase = self.config_class(**__a )
__lowerCAmelCase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(("torch_dtype", config.torch_dtype, torch.floataa) )
elif getattr(__a , __a ) != value:
wrong_values.append((key, getattr(__a , __a ), value) )
if len(__a ) > 0:
__lowerCAmelCase = "\n".join([f"- {v[0]}: got {v[1]} instead of {v[2]}" for v in wrong_values] )
raise ValueError(f"The following keys were not properly set in the config:\n{errors}" )
def snake_case ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 57 | 1 |
def _lowerCAmelCase ( A__: dict ):
'''simple docstring'''
UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(A__ , A__ , A__ , A__ )
for node in graph )
def _lowerCAmelCase ( A__: dict , A__: int , A__: set , A__: set ):
'''simple docstring'''
visited.add(A__ )
rec_stk.add(A__ )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(A__ , A__ , A__ , A__ ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(A__ )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 152 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 152 | 1 |
"""simple docstring"""
import argparse
_lowercase : Optional[Any] = 'docs/source/_static/js/custom.js'
def lowercase__ ( snake_case_ :int ):
with open(snake_case_ , encoding='''utf-8''' , newline='''\n''' ) as f:
__UpperCAmelCase = f.readlines()
__UpperCAmelCase = 0
# First let's put the right version
while not lines[index].startswith('''const stableVersion =''' ):
index += 1
__UpperCAmelCase = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith('''const versionMapping = {''' ):
index += 1
# We go until the end
while not lines[index].startswith('''}''' ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(snake_case_ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--version', help='Release version.')
_lowercase : str = parser.parse_args()
update_custom_js(args.version)
| 332 |
"""simple docstring"""
import pprint
import requests
_lowercase : Optional[Any] = 'https://zenquotes.io/api'
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def lowercase__ ( ):
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
_lowercase : int = random_quotes()
pprint.pprint(response)
| 332 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_snake_case = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-classification/requirements.txt")
_snake_case = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def lowerCAmelCase_ ( snake_case_ ):
with open(snake_case_,"""rb""" ) as f:
_A : Tuple = Image.open(snake_case_ )
return im.convert("""RGB""" )
@dataclass
class lowercase :
_a = field(
default=UpperCamelCase__,metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
},)
_a = field(
default=UpperCamelCase__,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_a = field(default=UpperCamelCase__,metadata={"help": "A folder containing the training data."} )
_a = field(default=UpperCamelCase__,metadata={"help": "A folder containing the validation data."} )
_a = field(
default=0.15,metadata={"help": "Percent to split off of train for validation."} )
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},)
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},)
def a__ ( self ) -> List[Any]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class lowercase :
_a = field(
default="google/vit-base-patch16-224-in21k",metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"},)
_a = field(
default=UpperCamelCase__,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase__ )},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_a = field(
default=UpperCamelCase__,metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
_a = field(
default="main",metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},)
_a = field(default=UpperCamelCase__,metadata={"help": "Name or path of preprocessor config."} )
_a = field(
default=UpperCamelCase__,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
},)
_a = field(
default=UpperCamelCase__,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."},)
def lowerCAmelCase_ ( snake_case_ ):
_A : str = torch.stack([example["""pixel_values"""] for example in examples] )
_A : str = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_A : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""",snake_case_,snake_case_ )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",datefmt="""%m/%d/%Y %H:%M:%S""",handlers=[logging.StreamHandler(sys.stdout )],)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_A : List[Any] = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
_A : Dict = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_A : Optional[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
_A : Optional[int] = load_dataset(
data_args.dataset_name,data_args.dataset_config_name,cache_dir=model_args.cache_dir,task="""image-classification""",use_auth_token=True if model_args.use_auth_token else None,)
else:
_A : Tuple = {}
if data_args.train_dir is not None:
_A : Any = os.path.join(data_args.train_dir,"""**""" )
if data_args.validation_dir is not None:
_A : str = os.path.join(data_args.validation_dir,"""**""" )
_A : List[str] = load_dataset(
"""imagefolder""",data_files=snake_case_,cache_dir=model_args.cache_dir,task="""image-classification""",)
# If we don't have a validation split, split off a percentage of train as validation.
_A : Tuple = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split,snake_case_ ) and data_args.train_val_split > 0.0:
_A : Union[str, Any] = dataset["""train"""].train_test_split(data_args.train_val_split )
_A : List[Any] = split["""train"""]
_A : Optional[Any] = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_A : Any = dataset["""train"""].features["""labels"""].names
_A : Dict = {}, {}
for i, label in enumerate(snake_case_ ):
_A : List[str] = str(snake_case_ )
_A : Dict = label
# Load the accuracy metric from the datasets package
_A : Tuple = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(snake_case_ ):
return metric.compute(predictions=np.argmax(p.predictions,axis=1 ),references=p.label_ids )
_A : Any = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path,num_labels=len(snake_case_ ),labelaid=snake_case_,idalabel=snake_case_,finetuning_task="""image-classification""",cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
_A : Union[str, Any] = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path,from_tf=bool(""".ckpt""" in model_args.model_name_or_path ),config=snake_case_,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes,)
_A : Union[str, Any] = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
_A : Optional[int] = image_processor.size["""shortest_edge"""]
else:
_A : List[str] = (image_processor.size["""height"""], image_processor.size["""width"""])
_A : Optional[Any] = Normalize(mean=image_processor.image_mean,std=image_processor.image_std )
_A : int = Compose(
[
RandomResizedCrop(snake_case_ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
_A : Union[str, Any] = Compose(
[
Resize(snake_case_ ),
CenterCrop(snake_case_ ),
ToTensor(),
normalize,
] )
def train_transforms(snake_case_ ):
_A : List[Any] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(snake_case_ ):
_A : Tuple = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
_A : str = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
_A : Optional[int] = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(snake_case_ )
# Initalize our trainer
_A : Optional[int] = Trainer(
model=snake_case_,args=snake_case_,train_dataset=dataset["""train"""] if training_args.do_train else None,eval_dataset=dataset["""validation"""] if training_args.do_eval else None,compute_metrics=snake_case_,tokenizer=snake_case_,data_collator=snake_case_,)
# Training
if training_args.do_train:
_A : List[str] = None
if training_args.resume_from_checkpoint is not None:
_A : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_A : Optional[Any] = last_checkpoint
_A : Any = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("""train""",train_result.metrics )
trainer.save_metrics("""train""",train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_A : Optional[int] = trainer.evaluate()
trainer.log_metrics("""eval""",snake_case_ )
trainer.save_metrics("""eval""",snake_case_ )
# Write model card and (optionally) push to hub
_A : Any = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
if __name__ == "__main__":
main()
| 355 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
_snake_case = logging.getLogger()
def lowerCAmelCase_ ( ):
_A : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""-f""" )
_A : Optional[Any] = parser.parse_args()
return args.f
class lowercase ( UpperCamelCase__ ):
def a__ ( self ) -> None:
_A : List[Any] = logging.StreamHandler(sys.stdout )
logger.addHandler(_a )
def a__ ( self , _a ) -> Dict:
_A : Tuple = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , """run_glue_deebert.py""" )
with patch.object(_a , """argv""" , _a ):
_A : Optional[Any] = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(_a , 0.666 )
@slow
@require_torch_non_multi_gpu
def a__ ( self ) -> Optional[int]:
_A : Tuple = """
--model_type roberta
--model_name_or_path roberta-base
--task_name MRPC
--do_train
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--max_seq_length 128
--per_gpu_eval_batch_size=1
--per_gpu_train_batch_size=8
--learning_rate 2e-4
--num_train_epochs 3
--overwrite_output_dir
--seed 42
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--save_steps 0
--overwrite_cache
--eval_after_first_stage
""".split()
self.run_and_check(_a )
_A : Optional[Any] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--eval_each_highway
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
_A : List[str] = """
--model_type roberta
--model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--task_name MRPC
--do_eval
--do_lower_case
--data_dir ./tests/fixtures/tests_samples/MRPC/
--output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage
--plot_data_dir ./examples/deebert/results/
--max_seq_length 128
--early_exit_entropy 0.1
--eval_highway
--overwrite_cache
--per_gpu_eval_batch_size=1
""".split()
self.run_and_check(_a )
| 343 | 0 |
"""simple docstring"""
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
lowerCAmelCase__ = pytest.mark.integration
lowerCAmelCase__ = {'''comet'''}
lowerCAmelCase__ = importlib.util.find_spec('''fairseq''') is not None
lowerCAmelCase__ = {'''code_eval'''}
lowerCAmelCase__ = os.name == '''nt'''
lowerCAmelCase__ = {'''bertscore''', '''frugalscore''', '''perplexity'''}
lowerCAmelCase__ = importlib.util.find_spec('''transformers''') is not None
def a__ ( SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE )
def wrapper(self : Tuple , SCREAMING_SNAKE_CASE : str ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , SCREAMING_SNAKE_CASE )
return wrapper
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase , lowercase , lowercase )
@local
class SCREAMING_SNAKE_CASE__ ( parameterized.TestCase ):
"""simple docstring"""
a : int ={}
a : List[str] =None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : str = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
lowerCAmelCase : List[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=snake_case__ )
# check parameters
lowerCAmelCase : Optional[Any] = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(snake_case__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase : Optional[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[Any] = "[...]"
lowerCAmelCase : Dict = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , snake_case__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase : List[Any] = doctest.testmod(snake_case__ , verbose=snake_case__ , raise_on_error=snake_case__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def lowercase__ ( self , snake_case__ , snake_case__ ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](snake_case__ ):
yield
else:
yield
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
def load_local_metric(snake_case__ , *snake_case__ , **snake_case__ ):
return load_metric(os.path.join("metrics" , snake_case__ ) , *snake_case__ , **snake_case__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase : Dict = load_local_metric
yield
@classmethod
def lowercase__ ( cls , snake_case__ ):
"""simple docstring"""
def wrapper(snake_case__ ):
lowerCAmelCase : Dict = contextmanager(snake_case__ )
lowerCAmelCase : List[str] = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase : Optional[Any] = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
import torch
def bert_cos_score_idf(SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Any , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase : Tuple = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
def load_from_checkpoint(SCREAMING_SNAKE_CASE : Optional[Any] ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def lowercase__ ( self , snake_case__ , *snake_case__ , **snake_case__ ):
"""simple docstring"""
assert len(snake_case__ ) == 2
lowerCAmelCase : Union[str, Any] = [0.19, 0.92]
return scores, sum(snake_case__ ) / len(snake_case__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase : Tuple = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase : List[Any] = load_from_checkpoint
yield
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : int = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase : List[Any] = "ERROR"
lowerCAmelCase : Optional[int] = f"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(SCREAMING_SNAKE_CASE , match=re.escape(SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=SCREAMING_SNAKE_CASE )
| 108 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="dpr"
def __init__( self , snake_case__=30_522 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3_072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=2 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__="absolute" , snake_case__ = 0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
lowerCAmelCase : Union[str, Any] = vocab_size
lowerCAmelCase : str = hidden_size
lowerCAmelCase : Any = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Union[str, Any] = hidden_act
lowerCAmelCase : Dict = intermediate_size
lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase : Dict = attention_probs_dropout_prob
lowerCAmelCase : Dict = max_position_embeddings
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : Any = layer_norm_eps
lowerCAmelCase : Dict = projection_dim
lowerCAmelCase : Dict = position_embedding_type
| 108 | 1 |
from __future__ import annotations
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(_A ) / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_SCREAMING_SNAKE_CASE : Dict = '''base_with_context'''
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
for lyr_num, lyr in enumerate(model.encoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = ly_weight['''attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_A )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
SCREAMING_SNAKE_CASE__ = weights[F'''layers_{lyr_num}''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''self_attention''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = ly_weight['''MultiHeadDotProductAttention_0''']
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
SCREAMING_SNAKE_CASE__ = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
SCREAMING_SNAKE_CASE__ = jnp.tree_util.tree_map(onp.array , _A )
SCREAMING_SNAKE_CASE__ = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
SCREAMING_SNAKE_CASE__ = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
SCREAMING_SNAKE_CASE__ = inference.parse_training_gin_file(_A , _A )
SCREAMING_SNAKE_CASE__ = inference.InferenceModel(args.checkpoint_path , _A )
SCREAMING_SNAKE_CASE__ = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
SCREAMING_SNAKE_CASE__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
SCREAMING_SNAKE_CASE__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
SCREAMING_SNAKE_CASE__ = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _A )
SCREAMING_SNAKE_CASE__ = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _A )
SCREAMING_SNAKE_CASE__ = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _A )
SCREAMING_SNAKE_CASE__ = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
SCREAMING_SNAKE_CASE__ = SpectrogramDiffusionPipeline(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=F"{MODEL}/checkpoint_500000",
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
main(args)
| 218 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : list[int] , snake_case__ : list[int] ):
"""simple docstring"""
if not len(snake_case__ ) == len(snake_case__ ) == 3:
raise ValueError("""Please enter a valid equation.""" )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError("""Both a & b of two equations can't be zero.""" )
# Extract the coefficients
_snake_case , _snake_case , _snake_case : Optional[Any] = equationa
_snake_case , _snake_case , _snake_case : Optional[Any] = equationa
# Calculate the determinants of the matrices
_snake_case : Any = aa * ba - aa * ba
_snake_case : Optional[int] = ca * ba - ca * ba
_snake_case : Tuple = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError("""Infinite solutions. (Consistent system)""" )
else:
raise ValueError("""No solution. (Inconsistent system)""" )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_snake_case : int = determinant_x / determinant
_snake_case : List[str] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 64 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ) -> str:
if not conversation_id:
lowerCAmelCase__ : List[str] = uuid.uuida()
if past_user_inputs is None:
lowerCAmelCase__ : List[Any] = []
if generated_responses is None:
lowerCAmelCase__ : str = []
lowerCAmelCase__ : uuid.UUID = conversation_id
lowerCAmelCase__ : List[str] = past_user_inputs
lowerCAmelCase__ : List[str] = generated_responses
lowerCAmelCase__ : Optional[str] = text
def __eq__( self ,__UpperCAmelCase ) -> Dict:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase = False ) -> Optional[Any]:
if self.new_user_input:
if overwrite:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
F"""with: \"{text}\".""" )
lowerCAmelCase__ : Optional[int] = text
else:
logger.warning(
F"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
F"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
lowerCAmelCase__ : Optional[Any] = text
def UpperCAmelCase_ ( self ) -> List[Any]:
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCAmelCase__ : Union[str, Any] = None
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
self.generated_responses.append(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[str]:
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self ) -> Tuple:
lowerCAmelCase__ : Tuple = F"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
lowerCAmelCase__ : Any = """user""" if is_user else """bot"""
output += F"""{name} >> {text} \n"""
return output
@add_end_docstrings(
SCREAMING_SNAKE_CASE_ , R'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Tuple:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
if self.tokenizer.pad_token_id is None:
lowerCAmelCase__ : Tuple = self.tokenizer.eos_token
def UpperCAmelCase_ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : Optional[int] = {}
lowerCAmelCase__ : List[str] = {}
if min_length_for_response is not None:
lowerCAmelCase__ : Any = min_length_for_response
if minimum_tokens is not None:
lowerCAmelCase__ : Optional[int] = minimum_tokens
if "max_length" in generate_kwargs:
lowerCAmelCase__ : Optional[Any] = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCAmelCase__ : int = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__UpperCAmelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase=0 ,**__UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Optional[int] = super().__call__(__UpperCAmelCase ,num_workers=__UpperCAmelCase ,**__UpperCAmelCase )
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ) and len(__UpperCAmelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=32 ) -> Dict[str, Any]:
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
F"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
lowerCAmelCase__ : str = self.tokenizer._build_conversation_input_ids(__UpperCAmelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCAmelCase__ : List[Any] = self._legacy_parse_and_tokenize(__UpperCAmelCase )
if self.framework == "pt":
lowerCAmelCase__ : List[Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCAmelCase__ : Dict = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=10 ,**__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
lowerCAmelCase__ : Optional[Any] = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
lowerCAmelCase__ : str = max_length - minimum_tokens
lowerCAmelCase__ : Union[str, Any] = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCAmelCase__ : Tuple = model_inputs["""attention_mask"""][:, -trim:]
lowerCAmelCase__ : str = model_inputs.pop("""conversation""" )
lowerCAmelCase__ : Union[str, Any] = max_length
lowerCAmelCase__ : Any = self.model.generate(**__UpperCAmelCase ,**__UpperCAmelCase )
if self.model.config.is_encoder_decoder:
lowerCAmelCase__ : int = 1
else:
lowerCAmelCase__ : int = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=True ) -> List[str]:
lowerCAmelCase__ : Optional[int] = model_outputs["""output_ids"""]
lowerCAmelCase__ : Tuple = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
lowerCAmelCase__ : Union[str, Any] = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__UpperCAmelCase )
return conversation
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Dict = self.tokenizer.eos_token_id
lowerCAmelCase__ : int = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ) )
if len(__UpperCAmelCase ) > self.tokenizer.model_max_length:
lowerCAmelCase__ : Optional[Any] = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 37 | 0 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case_ : List[str] = len(bin(_UpperCamelCase )[3:] )
snake_case_ : str = bin(abs(_UpperCamelCase ) - (1 << binary_number_length) )[3:]
snake_case_ : Dict = (
(
'''1'''
+ '''0''' * (binary_number_length - len(_UpperCamelCase ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase_ = TypeVar('''KEY''')
lowerCAmelCase_ = TypeVar('''VAL''')
@dataclass(frozen=_a, slots=_a )
class __lowerCAmelCase ( Generic[KEY, VAL] ):
lowerCamelCase_ : KEY
lowerCamelCase_ : VAL
class __lowerCAmelCase ( _Item ):
def __init__(self ) -> None:
'''simple docstring'''
super().__init__(__magic_name__ , __magic_name__ )
def __bool__(self ) -> bool:
'''simple docstring'''
return False
lowerCAmelCase_ = _DeletedItem()
class __lowerCAmelCase ( MutableMapping[KEY, VAL] ):
def __init__(self , __magic_name__ = 8 , __magic_name__ = 0.75 ) -> None:
'''simple docstring'''
snake_case_ : List[Any] = initial_block_size
snake_case_ : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
snake_case_ : List[str] = capacity_factor
snake_case_ : int = 0
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return hash(__magic_name__ ) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
return (ind + 1) % len(self._buckets )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ ) -> bool:
'''simple docstring'''
snake_case_ : Optional[int] = self._buckets[ind]
if not stored:
snake_case_ : Optional[Any] = _Item(__magic_name__ , __magic_name__ )
self._len += 1
return True
elif stored.key == key:
snake_case_ : List[Any] = _Item(__magic_name__ , __magic_name__ )
return True
else:
return False
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
snake_case_ : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__magic_name__ )
def lowerCamelCase (self ) -> bool:
'''simple docstring'''
if len(self._buckets ) <= self._initial_block_size:
return False
snake_case_ : int = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def lowerCamelCase (self , __magic_name__ ) -> None:
'''simple docstring'''
snake_case_ : List[str] = self._buckets
snake_case_ : int = [None] * new_size
snake_case_ : Optional[int] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) * 2 )
def lowerCamelCase (self ) -> None:
'''simple docstring'''
self._resize(len(self._buckets ) // 2 )
def lowerCamelCase (self , __magic_name__ ) -> Iterator[int]:
'''simple docstring'''
snake_case_ : Dict = self._get_bucket_index(__magic_name__ )
for _ in range(len(self._buckets ) ):
yield ind
snake_case_ : Tuple = self._get_next_ind(__magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
if self._try_set(__magic_name__ , __magic_name__ , __magic_name__ ):
break
def __setitem__(self , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if self._is_full():
self._size_up()
self._add_item(__magic_name__ , __magic_name__ )
def __delitem__(self , __magic_name__ ) -> None:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : Optional[Any] = self._buckets[ind]
if item is None:
raise KeyError(__magic_name__ )
if item is _deleted:
continue
if item.key == key:
snake_case_ : Union[str, Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__(self , __magic_name__ ) -> VAL:
'''simple docstring'''
for ind in self._iterate_buckets(__magic_name__ ):
snake_case_ : List[Any] = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__magic_name__ )
def __len__(self ) -> int:
'''simple docstring'''
return self._len
def __iter__(self ) -> Iterator[KEY]:
'''simple docstring'''
yield from (item.key for item in self._buckets if item)
def __repr__(self ) -> str:
'''simple docstring'''
snake_case_ : List[str] = ''' ,'''.join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 279 | 0 |
import numpy
# List of input, output pairs
_UpperCAmelCase : Union[str, Any] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Dict = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Optional[Any] = len(train_data)
_UpperCAmelCase : Optional[Any] = 0.009
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase="train" ) -> List[str]:
return calculate_hypothesis_value(_UpperCAmelCase , _UpperCAmelCase ) - output(
_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
lowerCamelCase__ : str = 0
for i in range(len(_UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> int:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase=m ) -> Dict:
lowerCamelCase__ : Union[str, Any] = 0
for i in range(_UpperCAmelCase ):
if index == -1:
summation_value += _error(_UpperCAmelCase )
else:
summation_value += _error(_UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : List[Any] = summation_of_cost_derivative(_UpperCAmelCase , _UpperCAmelCase ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCamelCase__ : List[str] = 0.000_002
lowerCamelCase__ : Any = 0
lowerCamelCase__ : Tuple = 0
while True:
j += 1
lowerCamelCase__ : str = [0, 0, 0, 0]
for i in range(0 , len(_UpperCAmelCase ) ):
lowerCamelCase__ : Optional[Any] = get_cost_derivative(i - 1 )
lowerCamelCase__ : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
_UpperCAmelCase , _UpperCAmelCase , atol=_UpperCAmelCase , rtol=_UpperCAmelCase , ):
break
lowerCamelCase__ : str = temp_parameter_vector
print(('Number of iterations:', j) )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
for i in range(len(_UpperCAmelCase ) ):
print(('Actual output value:', output(_UpperCAmelCase , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(_UpperCAmelCase , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print("""\nTesting gradient descent for a linear hypothesis function.\n""")
test_gradient_descent()
| 50 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_lowercase : Union[str, Any] = transforms.Compose(
[
transforms.Resize((2_56, 2_56)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase__ ( snake_case_ :List[Any] ):
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
__UpperCAmelCase = [image]
__UpperCAmelCase = [trans(img.convert('''RGB''' ) ) for img in image]
__UpperCAmelCase = torch.stack(snake_case_ )
return image
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Any , _lowercase : str , _lowercase : str ):
super().__init__()
# make sure scheduler can always be converted to DDIM
__UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=_lowercase , scheduler=_lowercase )
def a ( self : int , _lowercase : List[str] ):
if strength < 0 or strength > 1:
raise ValueError(F'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def a ( self : List[Any] , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : int ):
# get the original timestep using init_timestep
__UpperCAmelCase = min(int(num_inference_steps * strength ) , _lowercase )
__UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
__UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int]=None ):
if not isinstance(_lowercase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(_lowercase )}''' )
__UpperCAmelCase = image.to(device=_lowercase , dtype=_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(_lowercase )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
__UpperCAmelCase = init_latents.shape
__UpperCAmelCase = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
# get latents
print('''add noise to latents at timestep''' , _lowercase )
__UpperCAmelCase = self.scheduler.add_noise(_lowercase , _lowercase , _lowercase )
__UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , _lowercase : Union[torch.FloatTensor, PIL.Image.Image] = None , _lowercase : float = 0.8 , _lowercase : int = 1 , _lowercase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase : float = 0.0 , _lowercase : int = 50 , _lowercase : Optional[bool] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , ):
self.check_inputs(_lowercase )
# 2. Preprocess image
__UpperCAmelCase = preprocess(_lowercase )
# 3. set timesteps
self.scheduler.set_timesteps(_lowercase , device=self.device )
__UpperCAmelCase , __UpperCAmelCase = self.get_timesteps(_lowercase , _lowercase , self.device )
__UpperCAmelCase = timesteps[:1].repeat(_lowercase )
# 4. Prepare latent variables
__UpperCAmelCase = self.prepare_latents(_lowercase , _lowercase , _lowercase , self.unet.dtype , self.device , _lowercase )
__UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(_lowercase ):
# 1. predict noise model_output
__UpperCAmelCase = self.unet(_lowercase , _lowercase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__UpperCAmelCase = self.scheduler.step(
_lowercase , _lowercase , _lowercase , eta=_lowercase , use_clipped_model_output=_lowercase , generator=_lowercase , ).prev_sample
__UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=_lowercase )
| 332 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def A__ ( __lowerCamelCase ):
return x + 2
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = '''x = 3'''
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3} )
SCREAMING_SNAKE_CASE_ = '''x = y'''
SCREAMING_SNAKE_CASE_ = {'''y''': 5}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 5, '''y''': 5} )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = '''y = add_two(x)'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
# Won't work without the tool
with CaptureStdout() as out:
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
assert result is None
assert "tried to execute add_two" in out.out
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''x = 3'''
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3} )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''add_two''': add_two} , state=_A )
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
self.assertDictEqual(_A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = '''x = 3\ny = 5'''
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''y''': 5} )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE_ = '''text = f\'This is x: {x}.\''''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_A , {'''x''': 3, '''text''': '''This is x: 3.'''} )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = '''if x <= 3:\n y = 2\nelse:\n y = 5'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_A , {'''x''': 3, '''y''': 2} )
SCREAMING_SNAKE_CASE_ = {'''x''': 8}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'''x''': 8, '''y''': 5} )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '''test_list = [x, add_two(x)]'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''add_two''': add_two} , state=_A )
self.assertListEqual(_A , [3, 5] )
self.assertDictEqual(_A , {'''x''': 3, '''test_list''': [3, 5]} )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE_ = '''y = x'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'''x''': 3, '''y''': 3} )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = '''test_list = [x, add_two(x)]\ntest_list[1]'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''test_list''': [3, 5]} )
SCREAMING_SNAKE_CASE_ = '''test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'''
SCREAMING_SNAKE_CASE_ = {'''x''': 3}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''add_two''': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'''x''': 3, '''test_dict''': {'''x''': 3, '''y''': 5}} )
def _UpperCamelCase ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '''x = 0\nfor i in range(3):\n x = i'''
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = evaluate(_A , {'''range''': range} , state=_A )
assert result == 2
self.assertDictEqual(_A , {'''x''': 2, '''i''': 2} )
| 363 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE_ = '''imagenet-1k-id2label.json'''
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__lowerCamelCase, __lowerCamelCase, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = '''std_conv''' if '''bit''' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE_ = BitConfig(
conv_layer=__lowerCamelCase, num_labels=10_00, idalabel=__lowerCamelCase, labelaid=__lowerCamelCase, )
return config
def A__ ( __lowerCamelCase ):
if "stem.conv" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''stem.conv''', '''bit.embedder.convolution''' )
if "blocks" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''blocks''', '''layers''' )
if "head.fc" in name:
SCREAMING_SNAKE_CASE_ = name.replace('''head.fc''', '''classifier.1''' )
if name.startswith('''norm''' ):
SCREAMING_SNAKE_CASE_ = '''bit.''' + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE_ = '''bit.encoder.''' + name
return name
def A__ ( ):
SCREAMING_SNAKE_CASE_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__lowerCamelCase, stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False ):
SCREAMING_SNAKE_CASE_ = get_config(__lowerCamelCase )
# load original model from timm
SCREAMING_SNAKE_CASE_ = create_model(__lowerCamelCase, pretrained=__lowerCamelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE_ = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE_ = state_dict.pop(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = val.squeeze() if '''head''' in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE_ = BitForImageClassification(__lowerCamelCase )
model.eval()
model.load_state_dict(__lowerCamelCase )
# create image processor
SCREAMING_SNAKE_CASE_ = create_transform(**resolve_data_config({}, model=__lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = transform.transforms
SCREAMING_SNAKE_CASE_ = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE_ = BitImageProcessor(
do_resize=__lowerCamelCase, size={'''shortest_edge''': timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=__lowerCamelCase, crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]}, do_normalize=__lowerCamelCase, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), )
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = transform(__lowerCamelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = processor(__lowerCamelCase, return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCamelCase, __lowerCamelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs.logits
print('''Logits:''', logits[0, :3] )
print('''Predicted class:''', model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE_ = timm_model(__lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCamelCase, outputs.logits, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
__UpperCAmelCase = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 0 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
__snake_case : List[str] = """\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
"""
__snake_case : Union[str, Any] = """\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper \"Evaluating Large Language Models Trained on Code\"
(https://arxiv.org/abs/2107.03374).
"""
__snake_case : Optional[int] = """
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric(\"code_eval\")
>>> test_cases = [\"assert add(2,3)==5\"]
>>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
"""
__snake_case : List[Any] = """
################################################################################
!!!WARNING!!!
################################################################################
The \"code_eval\" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper \"Evaluating Large
Language Models Trained on Code\" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this
with:
>>> import os
>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"
################################################################################\
"""
__snake_case : Any = """The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the \"Software\"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE."""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A__(datasets.Metric ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Tuple:
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=[1, 10, 100] , _lowercase=4 , _lowercase=3.0 ) -> Optional[Any]:
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
a_ : int = []
a_ : Union[str, Any] = Counter()
a_ : Optional[int] = 0
a_ : str = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
a_ : Union[str, Any] = candidate + "\n" + test_case
a_ : Dict = (test_program, timeout, task_id, completion_id[task_id])
a_ : int = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
a_ : Any = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
a_ : str = [], []
for result in results.values():
result.sort()
a_ : int = [r[1]["passed"] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
a_ : Optional[int] = np.array(_UpperCAmelCase )
a_ : int = np.array(_UpperCAmelCase )
a_ : Union[str, Any] = k
a_ : str = {F'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCAmelCase ( a__ , a__ , a__):
'''simple docstring'''
def estimator(a__ , a__ , a__) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1))
if isinstance(snake_case__ , snake_case__):
a_ : Tuple = itertools.repeat(snake_case__ , len(snake_case__))
else:
assert len(snake_case__) == len(snake_case__)
a_ : Optional[int] = iter(snake_case__)
return np.array([estimator(int(snake_case__) , int(snake_case__) , snake_case__) for n, c in zip(snake_case__ , snake_case__)])
| 248 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCAmelCase = abspath(join(dirname(dirname(dirname(__file__))), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case__ )
def __lowerCAmelCase ( snake_case__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__UpperCamelCase : int = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case__ , id=snake_case__ )
| 298 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class a__ ( lowerCamelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = 'roc_bert'
def __init__( self , _UpperCamelCase=30522 , _UpperCamelCase=768 , _UpperCamelCase=12 , _UpperCamelCase=12 , _UpperCamelCase=3072 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=512 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=1E-1_2 , _UpperCamelCase=True , _UpperCamelCase=0 , _UpperCamelCase="absolute" , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=768 , _UpperCamelCase=910 , _UpperCamelCase=512 , _UpperCamelCase=24858 , _UpperCamelCase=True , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : str = vocab_size
_lowercase : List[str] = max_position_embeddings
_lowercase : List[Any] = hidden_size
_lowercase : Dict = num_hidden_layers
_lowercase : str = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Optional[Any] = hidden_act
_lowercase : Union[str, Any] = hidden_dropout_prob
_lowercase : Dict = attention_probs_dropout_prob
_lowercase : Dict = initializer_range
_lowercase : List[Any] = type_vocab_size
_lowercase : Tuple = layer_norm_eps
_lowercase : Optional[int] = use_cache
_lowercase : Tuple = enable_pronunciation
_lowercase : Optional[int] = enable_shape
_lowercase : int = pronunciation_embed_dim
_lowercase : List[str] = pronunciation_vocab_size
_lowercase : int = shape_embed_dim
_lowercase : str = shape_vocab_size
_lowercase : str = concat_input
_lowercase : Dict = position_embedding_type
_lowercase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=_UpperCamelCase , **_UpperCamelCase )
| 362 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = IFImgaImgSuperResolutionPipeline
_SCREAMING_SNAKE_CASE : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_SCREAMING_SNAKE_CASE : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_SCREAMING_SNAKE_CASE : Optional[int] = PipelineTesterMixin.required_optional_params - {'latents'}
def _lowerCamelCase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ):
"""simple docstring"""
if str(_UpperCamelCase ).startswith("mps" ):
_lowercase : Tuple = torch.manual_seed(_UpperCamelCase )
else:
_lowercase : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
_lowercase : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : Optional[int] = floats_tensor((1, 3, 16, 16) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
_lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _lowerCamelCase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowerCamelCase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 199 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase : Union[str, Any] = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
lowerCAmelCase : Optional[int] = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase : List[Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase : List[Any] = sorted(arg_to_scheduler.keys())
lowerCAmelCase : Optional[Any] = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class __lowercase ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : argparse.Namespace , lowerCAmelCase__ : int=None , lowerCAmelCase__ : Tuple="base" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : Union[str, Any] , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = 0
SCREAMING_SNAKE_CASE_: List[Any] = Path(self.hparams.output_dir)
SCREAMING_SNAKE_CASE_: str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE_: int = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: PretrainedConfig = config
SCREAMING_SNAKE_CASE_: Optional[int] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , lowerCAmelCase__ , lowerCAmelCase__):
assert hasattr(self.config , lowerCAmelCase__), F"model config doesn't have a `{p}` attribute"
setattr(self.config , lowerCAmelCase__ , getattr(self.hparams , lowerCAmelCase__))
if tokenizer is None:
SCREAMING_SNAKE_CASE_: str = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: PreTrainedTokenizer = tokenizer
SCREAMING_SNAKE_CASE_: str = MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE_: Any = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path) , config=self.config , cache_dir=lowerCAmelCase__ , )
else:
SCREAMING_SNAKE_CASE_: List[str] = model
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase__ : List[Any] , **lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[str] = self.model_type.from_pretrained(*lowerCAmelCase__ , **lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE_: str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
SCREAMING_SNAKE_CASE_: str = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = self.model
SCREAMING_SNAKE_CASE_: List[str] = ["bias", "LayerNorm.weight"]
SCREAMING_SNAKE_CASE_: List[str] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE_: Tuple = Adafactor(
lowerCAmelCase__ , lr=self.hparams.learning_rate , scale_parameter=lowerCAmelCase__ , relative_step=lowerCAmelCase__)
else:
SCREAMING_SNAKE_CASE_: Optional[int] = AdamW(
lowerCAmelCase__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
SCREAMING_SNAKE_CASE_: List[Any] = optimizer
SCREAMING_SNAKE_CASE_: Tuple = self.get_lr_scheduler()
return [optimizer], [scheduler]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : str):
return self.validation_step(lowerCAmelCase__ , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : List[str]):
return self.validation_end(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Any = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE_: List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : Union[str, Any]):
if stage == "test":
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.test_dataloader().dataset)
else:
SCREAMING_SNAKE_CASE_: Any = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = len(self.train_dataloader().dataset)
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : bool = False):
raise NotImplementedError("You must implement this for your task")
def _SCREAMING_SNAKE_CASE ( self : str):
return self.train_loader
def _SCREAMING_SNAKE_CASE ( self : str):
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase__ : Any):
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
lowerCAmelCase__ , list(filter(lowerCAmelCase__ , self.hparams.model_name_or_path.split("/"))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : Dict[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = self.output_dir.joinpath("best_tfmr")
SCREAMING_SNAKE_CASE_: Tuple = self.step_count
self.model.save_pretrained(lowerCAmelCase__)
self.tokenizer.save_pretrained(lowerCAmelCase__)
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]):
parser.add_argument(
"--model_name_or_path" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=lowerCAmelCase__ , help="Pretrained config name or path if not the same as model_name")
parser.add_argument(
"--tokenizer_name" , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(lowerCAmelCase__).parent / "test_run" / "cache") , type=lowerCAmelCase__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=lowerCAmelCase__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=lowerCAmelCase__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=lowerCAmelCase__ , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=lowerCAmelCase__ , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=lowerCAmelCase__ , help="The initial learning rate for Adam.")
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=lowerCAmelCase__ , metavar=lowerCAmelCase__ , type=lowerCAmelCase__ , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=lowerCAmelCase__ , help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon" , default=1E-8 , type=lowerCAmelCase__ , help="Epsilon for Adam optimizer.")
parser.add_argument("--warmup_steps" , default=0 , type=lowerCAmelCase__ , help="Linear warmup over warmup_steps.")
parser.add_argument("--num_workers" , default=4 , type=lowerCAmelCase__ , help="kwarg passed to DataLoader")
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=lowerCAmelCase__)
parser.add_argument("--train_batch_size" , default=32 , type=lowerCAmelCase__)
parser.add_argument("--eval_batch_size" , default=32 , type=lowerCAmelCase__)
parser.add_argument("--adafactor" , action="store_true")
class __lowercase ( pl.Callback ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowercase ( pl.Callback ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowerCAmelCase__)
class __lowercase ( pl.Callback ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: List[str] = trainer.lr_schedulers[0]["scheduler"]
SCREAMING_SNAKE_CASE_: str = {F"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : pl.Trainer , lowerCAmelCase__ : pl.LightningModule):
rank_zero_info("***** Validation results *****")
SCREAMING_SNAKE_CASE_: Tuple = trainer.callback_metrics
# Log results
for key in sorted(lowerCAmelCase__):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(lowerCAmelCase__ , str(metrics[key])))
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase__ : pl.Trainer , lowerCAmelCase__ : pl.LightningModule):
rank_zero_info("***** Test results *****")
SCREAMING_SNAKE_CASE_: Dict = trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE_: Union[str, Any] = os.path.join(pl_module.hparams.output_dir , "test_results.txt")
with open(lowerCAmelCase__ , "w") as writer:
for key in sorted(lowerCAmelCase__):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(lowerCAmelCase__ , str(metrics[key])))
writer.write("{} = {}\n".format(lowerCAmelCase__ , str(metrics[key])))
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
"--output_dir" , default=str(Path(_UpperCAmelCase ).parent / "test_run" / "model_checkpoints" ) , type=_UpperCAmelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=_UpperCAmelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_UpperCAmelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_UpperCAmelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_UpperCAmelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=_UpperCAmelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(_UpperCAmelCase ).parent / "test_run" / "dummy-train-data" ) , type=_UpperCAmelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=[] , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE_: List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_UpperCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE_: Optional[int] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_UpperCAmelCase )
if logging_callback is None:
SCREAMING_SNAKE_CASE_: List[str] = LoggingCallback()
SCREAMING_SNAKE_CASE_: int = {}
if args.fpaa:
SCREAMING_SNAKE_CASE_: List[Any] = 16
if args.gpus > 1:
SCREAMING_SNAKE_CASE_: Union[str, Any] = "auto"
SCREAMING_SNAKE_CASE_: int = "ddp"
SCREAMING_SNAKE_CASE_: Optional[Any] = args.accumulate_grad_batches
SCREAMING_SNAKE_CASE_: Any = None
SCREAMING_SNAKE_CASE_: int = "auto"
SCREAMING_SNAKE_CASE_: List[str] = pl.Trainer.from_argparse_args(
_UpperCAmelCase , weights_summary=_UpperCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_UpperCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **_UpperCAmelCase , )
if args.do_train:
trainer.fit(_UpperCAmelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 13 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : List[Any] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : str = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = TextaTextGenerationPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__)
return generator, ["Something to write", "Something else"]
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there")
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ANY(lowerCAmelCase__)}])
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there"))
SCREAMING_SNAKE_CASE_: List[Any] = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
SCREAMING_SNAKE_CASE_: Dict = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
[{"generated_text": ANY(lowerCAmelCase__)}, {"generated_text": ANY(lowerCAmelCase__)}],
] , )
with self.assertRaises(lowerCAmelCase__):
generator(4)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: Union[str, Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
SCREAMING_SNAKE_CASE_: Union[str, Any] = 3
SCREAMING_SNAKE_CASE_: Any = generator(
"Something there" , num_return_sequences=lowerCAmelCase__ , num_beams=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = generator("This is a test" , do_sample=lowerCAmelCase__ , num_return_sequences=2 , return_tensors=lowerCAmelCase__)
self.assertEqual(
lowerCAmelCase__ , [
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
] , )
SCREAMING_SNAKE_CASE_: str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE_: Union[str, Any] = "<pad>"
SCREAMING_SNAKE_CASE_: Tuple = generator(
["This is a test", "This is a second test"] , do_sample=lowerCAmelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCAmelCase__ , )
self.assertEqual(
lowerCAmelCase__ , [
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
[
{"generated_token_ids": ANY(torch.Tensor)},
{"generated_token_ids": ANY(torch.Tensor)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf")
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE_: List[Any] = generator("Something there" , do_sample=lowerCAmelCase__)
self.assertEqual(lowerCAmelCase__ , [{"generated_text": ""}])
| 13 | 1 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase: int = pytest.mark.integration
@require_faiss
class a__( lowerCamelCase__ ):
def lowercase_ ( self : List[str] ):
a : Optional[Any] = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__snake_case ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self : Tuple ):
import faiss
a : Dataset = self._create_dummy_dataset()
a : Any = dset.map(
lambda __snake_case , __snake_case : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__snake_case , keep_in_memory=__snake_case )
a : Optional[int] = dset.add_faiss_index('vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT )
a : Optional[Any] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def lowercase_ ( self : List[str] ):
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=1_00 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase_ ( self : str ):
import faiss
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
a : List[str] = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def lowercase_ ( self : Optional[int] ):
a : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__snake_case , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def lowercase_ ( self : Union[str, Any] ):
from elasticsearch import Elasticsearch
a : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a : str = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
a : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
a : str = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__snake_case )
a : Union[str, Any] = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class a__( lowerCamelCase__ ):
def lowercase_ ( self : Tuple ):
import faiss
a : List[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a : Dict = np.zeros(5 , dtype=np.floataa )
a : int = 1
a : Tuple = index.search(__snake_case )
self.assertRaises(__snake_case , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
a : Dict = index.search_batch(__snake_case )
self.assertRaises(__snake_case , index.search_batch , queries[0] )
a : List[str] = [scores[0] for scores in total_scores]
a : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __snake_case )
def lowercase_ ( self : Optional[Any] ):
import faiss
a : List[str] = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a : Any = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__snake_case ):
a : List[str] = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self : Optional[Any] ):
import faiss
a : List[Any] = faiss.IndexFlat(5 )
a : Tuple = FaissIndex(custom_index=__snake_case )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def lowercase_ ( self : Union[str, Any] ):
import faiss
a : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__snake_case ) as tmp_file:
index.save(tmp_file.name )
a : Any = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a : int = np.zeros(5 , dtype=np.floataa )
a : Union[str, Any] = 1
a : List[Any] = index.search(__snake_case )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase__ ( _A ):
import faiss
a : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a : List[Any] = 'index.faiss'
a : Optional[Any] = f"""mock://{index_name}"""
index.save(_A , storage_options=mockfs.storage_options )
a : str = FaissIndex.load(_A , storage_options=mockfs.storage_options )
a : Tuple = np.zeros(5 , dtype=np.floataa )
a : List[Any] = 1
a : str = index.search(_A )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class a__( lowerCamelCase__ ):
def lowercase_ ( self : List[Any] ):
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
a : Union[str, Any] = Elasticsearch()
a : Optional[Any] = {'acknowledged': True}
a : List[str] = ElasticSearchIndex(es_client=__snake_case )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
a : Dict = 'foo'
a : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a : List[Any] = index.search(__snake_case )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a : Optional[Any] = 'foo'
a : Union[str, Any] = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
a : Tuple = index.search(__snake_case , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a : Tuple = ['foo', 'bar', 'foobar']
a : List[str] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a : Any = index.search_batch(__snake_case )
a : Union[str, Any] = [scores[0] for scores in total_scores]
a : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case )
# batched queries with timeout
a : int = ['foo', 'bar', 'foobar']
a : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
a : int = index.search_batch(__snake_case , request_timeout=30 )
a : List[Any] = [scores[0] for scores in total_scores]
a : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__snake_case ) , 0 )
self.assertListEqual([1, 1, 1] , __snake_case ) | 354 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase: List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Optional[int] = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase: Dict = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
lowerCAmelCase: Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 96 | 0 |
'''simple docstring'''
def snake_case_ (_a : Optional[int]=2_8_1_2_3 ):
UpperCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
UpperCAmelCase = set()
UpperCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_a )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 34 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Dict ) -> Union[str, Any]:
if self.framework == "pytorch":
subprocess.run(
F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() ,encoding='utf-8' ,check=__snake_case ,)
assert hasattr(self ,'env' )
def lowerCamelCase__( self :List[Any] ,__snake_case :Optional[Any]=1 ) -> int:
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F'{self.env.base_job_name}-single' ,instance_count=__snake_case ,instance_type=self.instance_type ,debugger_hook_config=__snake_case ,hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} ,metric_definitions=self.env.metric_definitions ,py_version='py36' ,)
def lowerCamelCase__( self :Dict ,__snake_case :str ) -> Optional[int]:
TrainingJobAnalytics(__snake_case ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' )
def lowerCamelCase__( self :Tuple ) -> Union[str, Any]:
# create estimator
a__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
a__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
a__ = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
a__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'{estimator.latest_training_job.name}.json' ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__snake_case )
| 240 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "microsoft/speecht5_tts"
lowerCAmelCase__ = (
"This is a tool that reads an English text out loud. It takes an input named `text` which should contain the "
"text to read (in English) and returns a waveform object containing the sound."
)
lowerCAmelCase__ = "text_reader"
lowerCAmelCase__ = SpeechTaProcessor
lowerCAmelCase__ = SpeechTaForTextToSpeech
lowerCAmelCase__ = SpeechTaHifiGan
lowerCAmelCase__ = ["text"]
lowerCAmelCase__ = ["audio"]
def A__ ( self ) -> List[Any]:
'''simple docstring'''
if self.post_processor is None:
lowercase_ = "microsoft/speecht5_hifigan"
super().setup()
def A__ ( self , UpperCAmelCase , UpperCAmelCase=None ) -> Any:
'''simple docstring'''
lowercase_ = self.pre_processor(text=UpperCAmelCase , return_tensors="pt" , truncation=UpperCAmelCase )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("Datasets needs to be installed if not passing speaker embeddings." )
lowercase_ = load_dataset("Matthijs/cmu-arctic-xvectors" , split="validation" )
lowercase_ = torch.tensor(embeddings_dataset[7305]["xvector"] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A__ ( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(UpperCAmelCase ).cpu().detach()
| 297 |
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class __lowerCamelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = 1
@register_to_config
def __init__( self , UpperCAmelCase = 1000 , UpperCAmelCase = None ) -> List[Any]:
'''simple docstring'''
self.set_timesteps(UpperCAmelCase )
# standard deviation of the initial noise distribution
lowercase_ = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
lowercase_ = 4
# running values
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Optional[int]:
'''simple docstring'''
lowercase_ = num_inference_steps
lowercase_ = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
lowercase_ = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
lowercase_ = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
lowercase_ = torch.sin(steps * math.pi / 2 ) ** 2
lowercase_ = (1.0 - self.betas**2) ** 0.5
lowercase_ = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
lowercase_ = timesteps.to(UpperCAmelCase )
lowercase_ = []
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , ) -> Union[SchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" )
lowercase_ = (self.timesteps == timestep).nonzero().item()
lowercase_ = timestep_index + 1
lowercase_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCAmelCase )
if len(self.ets ) == 1:
lowercase_ = self.ets[-1]
elif len(self.ets ) == 2:
lowercase_ = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
lowercase_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
lowercase_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
lowercase_ = self._get_prev_sample(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase )
def A__ ( self , UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = self.alphas[timestep_index]
lowercase_ = self.betas[timestep_index]
lowercase_ = self.alphas[prev_timestep_index]
lowercase_ = self.betas[prev_timestep_index]
lowercase_ = (sample - sigma * ets) / max(UpperCAmelCase , 1e-8 )
lowercase_ = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ) -> List[str]:
'''simple docstring'''
return self.config.num_train_timesteps
| 297 | 1 |
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {
'''7z''': (seven_zip_file, SevenZipExtractor),
'''bz2''': (bza_file, BzipaExtractor),
'''gzip''': (gz_file, GzipExtractor),
'''lz4''': (lza_file, LzaExtractor),
'''tar''': (tar_file, TarExtractor),
'''xz''': (xz_file, XzExtractor),
'''zip''': (zip_file, ZipExtractor),
'''zstd''': (zstd_file, ZstdExtractor),
}
_lowerCAmelCase : Optional[int] = input_paths_and_base_extractors[compression_format]
if input_path is None:
_lowerCAmelCase : List[Any] = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
assert base_extractor.is_extractable(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
base_extractor.extract(_lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCAmelCase : Optional[Any] = file_path.read_text(encoding="utf-8" )
else:
_lowerCAmelCase : Optional[int] = output_path.read_text(encoding="utf-8" )
_lowerCAmelCase : List[str] = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"compression_format, is_archive" , [
("7z", True),
("bz2", False),
("gzip", False),
("lz4", False),
("tar", True),
("xz", False),
("zip", True),
("zstd", False),
] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
'''7z''': seven_zip_file,
'''bz2''': bza_file,
'''gzip''': gz_file,
'''lz4''': lza_file,
'''tar''': tar_file,
'''xz''': xz_file,
'''zip''': zip_file,
'''zstd''': zstd_file,
}
_lowerCAmelCase : Optional[Any] = input_paths[compression_format]
if input_path is None:
_lowerCAmelCase : Tuple = F"for '{compression_format}' compression_format, "
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = Extractor.infer_extractor_format(_lowerCamelCase )
assert extractor_format is not None
_lowerCAmelCase : Tuple = tmp_path / ('''extracted''' if is_archive else '''extracted.txt''')
Extractor.extract(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_lowerCAmelCase : Union[str, Any] = file_path.read_text(encoding="utf-8" )
else:
_lowerCAmelCase : str = output_path.read_text(encoding="utf-8" )
_lowerCAmelCase : Any = text_file.read_text(encoding="utf-8" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
import tarfile
_lowerCAmelCase : str = tmp_path / '''data_dot_dot'''
directory.mkdir()
_lowerCAmelCase : Any = directory / '''tar_file_with_dot_dot.tar'''
with tarfile.TarFile(_lowerCamelCase , "w" ) as f:
f.add(_lowerCamelCase , arcname=os.path.join(".." , text_file.name ) )
return path
@pytest.fixture
def A ( _lowerCamelCase ):
'''simple docstring'''
import tarfile
_lowerCAmelCase : Optional[int] = tmp_path / '''data_sym_link'''
directory.mkdir()
_lowerCAmelCase : str = directory / '''tar_file_with_sym_link.tar'''
os.symlink(".." , directory / "subdir" , target_is_directory=_lowerCamelCase )
with tarfile.TarFile(_lowerCamelCase , "w" ) as f:
f.add(str(directory / "subdir" ) , arcname="subdir" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"insecure_tar_file, error_log" , [("tar_file_with_dot_dot", "illegal path"), ("tar_file_with_sym_link", "Symlink")] , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {
'''tar_file_with_dot_dot''': tar_file_with_dot_dot,
'''tar_file_with_sym_link''': tar_file_with_sym_link,
}
_lowerCAmelCase : int = insecure_tar_files[insecure_tar_file]
_lowerCAmelCase : List[str] = tmp_path / '''extracted'''
TarExtractor.extract(_lowerCamelCase , _lowerCamelCase )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = tmpdir / '''not_a_zip_file'''
# From: https://github.com/python/cpython/pull/5053
_lowerCAmelCase : Tuple = (
B'''\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00'''
B'''\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6\'\x00\x00\x00\x15I'''
B'''DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07'''
B'''\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82'''
)
with not_a_zip_file.open("wb" ) as f:
f.write(_lowerCamelCase )
assert zipfile.is_zipfile(str(_lowerCamelCase ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_lowerCamelCase ) # but we're right
| 36 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __magic_name__ ( unittest.TestCase):
def UpperCAmelCase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ : Optional[int] = '''ZinengTang/tvlt-base'''
UpperCamelCase__ : int = tempfile.mkdtemp()
def UpperCAmelCase__ ( self : int , **lowerCamelCase__ : List[str] ) -> List[Any]:
'''simple docstring'''
return TvltImageProcessor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : Tuple ) -> List[Any]:
'''simple docstring'''
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowerCamelCase__ )
def UpperCAmelCase__ ( self : str ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ) -> int:
'''simple docstring'''
UpperCamelCase__ : int = self.get_image_processor()
UpperCamelCase__ : Union[str, Any] = self.get_feature_extractor()
UpperCamelCase__ : List[str] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ : Optional[int] = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , lowerCamelCase__ )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ : str = self.get_image_processor()
UpperCamelCase__ : List[Any] = self.get_feature_extractor()
UpperCamelCase__ : Dict = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : Any = np.ones([12000] )
UpperCamelCase__ : Union[str, Any] = feature_extractor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : Any = processor(audio=lowerCamelCase__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ : List[Any] = self.get_image_processor()
UpperCamelCase__ : Any = self.get_feature_extractor()
UpperCamelCase__ : int = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : int = np.ones([3, 224, 224] )
UpperCamelCase__ : List[str] = image_processor(lowerCamelCase__ , return_tensors='''np''' )
UpperCamelCase__ : str = processor(images=lowerCamelCase__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ : Any = self.get_image_processor()
UpperCamelCase__ : Dict = self.get_feature_extractor()
UpperCamelCase__ : Union[str, Any] = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
UpperCamelCase__ : List[str] = np.ones([12000] )
UpperCamelCase__ : Tuple = np.ones([3, 224, 224] )
UpperCamelCase__ : Optional[Any] = processor(audio=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def UpperCAmelCase__ ( self : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ : List[str] = self.get_image_processor()
UpperCamelCase__ : str = self.get_feature_extractor()
UpperCamelCase__ : Tuple = TvltProcessor(image_processor=lowerCamelCase__ , feature_extractor=lowerCamelCase__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 146 | 0 |
from __future__ import annotations
UpperCAmelCase_ = 10
def lowerCAmelCase_ ( __UpperCAmelCase: list[int] ) -> list[int]:
UpperCamelCase__ : Tuple = 1
UpperCamelCase__ : Union[str, Any] = max(__UpperCAmelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ : list[list] = [[] for _ in range(__UpperCAmelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ : str = int((i / placement) % RADIX )
buckets[tmp].append(__UpperCAmelCase )
# put each buckets' contents into list_of_ints
UpperCamelCase__ : str = 0
for b in range(__UpperCAmelCase ):
for i in buckets[b]:
UpperCamelCase__ : Tuple = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 247 |
from collections import deque
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> str:
UpperCamelCase__ : Optional[int] = len(__UpperCAmelCase )
UpperCamelCase__ : str = deque()
UpperCamelCase__ : int = [False for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : Optional[int] = [-1 for _ in range(__UpperCAmelCase )]
UpperCamelCase__ : str = index_of[:]
def strong_connect(__UpperCAmelCase: Optional[int] , __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: Union[str, Any] ):
UpperCamelCase__ : str = index # the number when this node is seen
UpperCamelCase__ : Any = index # lowest rank node reachable from here
index += 1
stack.append(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = True
for w in g[v]:
if index_of[w] == -1:
UpperCamelCase__ : str = strong_connect(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : List[str] = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
UpperCamelCase__ : Dict = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
UpperCamelCase__ : Tuple = []
UpperCamelCase__ : str = stack.pop()
UpperCamelCase__ : int = False
component.append(__UpperCAmelCase )
while w != v:
UpperCamelCase__ : int = stack.pop()
UpperCamelCase__ : Optional[Any] = False
component.append(__UpperCAmelCase )
components.append(__UpperCAmelCase )
return index
UpperCamelCase__ : Optional[Any] = []
for v in range(__UpperCAmelCase ):
if index_of[v] == -1:
strong_connect(__UpperCAmelCase , 0 , __UpperCAmelCase )
return components
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: List[Any] ) -> str:
UpperCamelCase__ : Dict = [[] for _ in range(__UpperCAmelCase )]
for u, v in edges:
g[u].append(__UpperCAmelCase )
return g
if __name__ == "__main__":
# Test
UpperCAmelCase_ = 7
UpperCAmelCase_ = [0, 0, 1, 2, 3, 3, 4, 4, 6]
UpperCAmelCase_ = [1, 3, 2, 0, 1, 4, 5, 6, 5]
UpperCAmelCase_ = [(u, v) for u, v in zip(source, target)]
UpperCAmelCase_ = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
| 247 | 1 |
# Imports
import numpy as np
class A__ :
"""simple docstring"""
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None) -> Dict:
'''simple docstring'''
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase)
def __lowercase ( self , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None) -> List[Any]:
'''simple docstring'''
if red is not None:
a__ : List[Any] = red
if green is not None:
a__ : List[Any] = green
if blue is not None:
a__ : Optional[int] = blue
if red_edge is not None:
a__ : Dict = red_edge
if nir is not None:
a__ : Optional[int] = nir
return True
def __lowercase ( self , lowercase="" , lowercase=None , lowercase=None , lowercase=None , lowercase=None , lowercase=None) -> List[Any]:
'''simple docstring'''
self.set_matricies(red=lowercase , green=lowercase , blue=lowercase , red_edge=lowercase , nir=lowercase)
a__ : Dict = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!')
return False
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def __lowercase ( self) -> Any:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def __lowercase ( self) -> Any:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def __lowercase ( self) -> Dict:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def __lowercase ( self) -> Dict:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def __lowercase ( self) -> int:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def __lowercase ( self , lowercase=0.08 , lowercase=1.22 , lowercase=0.03) -> Optional[Any]:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def __lowercase ( self) -> Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.nir / self.green) - 1
def __lowercase ( self) -> int:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def __lowercase ( self) -> Tuple:
'''simple docstring'''
return (self.red - self.blue) / self.red
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[str] = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5))) * (abs(ndvi + 0.5) ** (1 / 2))
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self.nir - self.green
def __lowercase ( self) -> Dict:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Dict = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.1_25) / (1 - self.red)
def __lowercase ( self , lowercase=0.16) -> List[Any]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def __lowercase ( self , lowercase=0.5) -> str:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue))
def __lowercase ( self , lowercase=None , lowercase=None) -> Optional[int]:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def __lowercase ( self) -> str:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def __lowercase ( self) -> List[str]:
'''simple docstring'''
return self.nir / self.red
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def __lowercase ( self) -> int:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def __lowercase ( self) -> str:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def __lowercase ( self) -> str:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = np.max([np.max(self.red), np.max(self.green), np.max(self.blue)])
a__ : Any = np.min([np.min(self.red), np.min(self.green), np.min(self.blue)])
return (max_value - min_value) / max_value
def __lowercase ( self) -> int:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def __lowercase ( self) -> Any:
'''simple docstring'''
return self.nir / self.red
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 99 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def UpperCamelCase (lowercase_: int , lowercase_: Dict , lowercase_: Tuple ) -> Any:
# Construct model
if gpta_config_file == "":
A__ : Dict = GPTaConfig()
else:
A__ : List[Any] = GPTaConfig.from_json_file(lowercase_ )
A__ : Tuple = GPTaModel(lowercase_ )
# Load weights from numpy
load_tf_weights_in_gpta(lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
A__ : Optional[Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , lowercase_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
A_ : str = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 192 | 0 |
"""simple docstring"""
import operator as op
def _UpperCAmelCase ( __lowerCamelCase : Tuple ) -> Optional[Any]:
_snake_case = []
_snake_case = lambda __lowerCamelCase , __lowerCamelCase : int(x / y ) # noqa: E731 integer division operation
_snake_case = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(__lowerCamelCase )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(__lowerCamelCase ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(__lowerCamelCase ) , sep=''' | ''' )
else:
_snake_case = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(__lowerCamelCase ) , sep=''' | ''' )
_snake_case = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(__lowerCamelCase ) , sep=''' | ''' )
stack.append(
str(opr[x](int(__lowerCamelCase ) , int(__lowerCamelCase ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(__lowerCamelCase ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
UpperCAmelCase__ = input('\n\nEnter a Postfix Equation (space separated) = ').split(' ')
print('\n\tResult = ', solve(Postfix))
| 40 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def __init__( self : str , *_lowerCamelCase : Tuple , **_lowerCamelCase : Optional[int] ):
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 40 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCamelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 67 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
SCREAMING_SNAKE_CASE__ : List[Any] = pytest.mark.integration
SCREAMING_SNAKE_CASE__ : Dict = {"comet"}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = importlib.util.find_spec("fairseq") is not None
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"code_eval"}
SCREAMING_SNAKE_CASE__ : str = os.name == "nt"
SCREAMING_SNAKE_CASE__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
SCREAMING_SNAKE_CASE__ : Tuple = importlib.util.find_spec("transformers") is not None
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Optional[int]:
@wraps(__lowerCAmelCase )
def wrapper(self : Tuple , __lowerCAmelCase : int ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('''"test requires Fairseq"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def __magic_name__ ( __lowerCAmelCase : Dict ) -> List[Any]:
@wraps(__lowerCAmelCase )
def wrapper(self : Tuple , __lowerCAmelCase : Tuple ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('''"test requires transformers"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def __magic_name__ ( __lowerCAmelCase : Union[str, Any] ) -> Union[str, Any]:
@wraps(__lowerCAmelCase )
def wrapper(self : Optional[int] , __lowerCAmelCase : Any ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('''"test not supported on Windows"''' )
else:
test_case(self , __lowerCAmelCase )
return wrapper
def __magic_name__ ( ) -> str:
__lowerCamelCase = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('''./metrics/*/''' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
__lowercase , __lowercase , __lowercase )
@local
class lowerCAmelCase__ ( parameterized.TestCase ):
a__ : str = {}
a__ : str = None
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:load_metric is deprecated:FutureWarning''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
__lowerCamelCase = datasets.load.import_main_class(metric_module.__name__ , dataset=SCREAMING_SNAKE_CASE__ )
# check parameters
__lowerCamelCase = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(SCREAMING_SNAKE_CASE__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
__lowerCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int ) -> Optional[Any]:
__lowerCamelCase = '''[...]'''
__lowerCamelCase = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) ).module_path )
# run doctest
with self.use_local_metrics():
__lowerCamelCase = doctest.testmod(SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , raise_on_error=SCREAMING_SNAKE_CASE__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> str:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](SCREAMING_SNAKE_CASE__ ):
yield
else:
yield
@contextmanager
def __A ( self : Optional[Any] ) -> Optional[Any]:
def load_local_metric(SCREAMING_SNAKE_CASE__ : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
return load_metric(os.path.join('''metrics''' , SCREAMING_SNAKE_CASE__ ) , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with patch('''datasets.load_metric''' ) as mock_load_metric:
__lowerCamelCase = load_local_metric
yield
@classmethod
def __A ( cls : Dict , SCREAMING_SNAKE_CASE__ : Dict ) -> Tuple:
def wrapper(SCREAMING_SNAKE_CASE__ : List[Any] ):
__lowerCamelCase = contextmanager(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('''bleurt''' )
def __magic_name__ ( __lowerCAmelCase : int ) -> Optional[Any]:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('''sv''' , '''''' , '''''' ) # handle pytest cli flags
class lowerCAmelCase__ ( __lowercase ):
def __A ( self : int , SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
assert len(input_dict['''input_ids'''] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('''bleurt.score._create_predictor''' ) as mock_create_predictor:
__lowerCamelCase = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('''bertscore''' )
def __magic_name__ ( __lowerCAmelCase : List[str] ) -> List[str]:
import torch
def bert_cos_score_idf(__lowerCAmelCase : Dict , __lowerCAmelCase : int , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(__lowerCAmelCase ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('''bert_score.scorer.get_model''' ), patch(
'''bert_score.scorer.bert_cos_score_idf''' ) as mock_bert_cos_score_idf:
__lowerCamelCase = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('''comet''' )
def __magic_name__ ( __lowerCAmelCase : List[Any] ) -> Optional[Any]:
def load_from_checkpoint(__lowerCAmelCase : Optional[int] ):
class lowerCAmelCase__ :
def __A ( self : Dict , SCREAMING_SNAKE_CASE__ : int , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
assert len(SCREAMING_SNAKE_CASE__ ) == 2
__lowerCamelCase = [0.19, 0.92]
return scores, sum(SCREAMING_SNAKE_CASE__ ) / len(SCREAMING_SNAKE_CASE__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('''comet.download_model''' ) as mock_download_model:
__lowerCamelCase = None
with patch('''comet.load_from_checkpoint''' ) as mock_load_from_checkpoint:
__lowerCamelCase = load_from_checkpoint
yield
def __magic_name__ ( ) -> Union[str, Any]:
__lowerCamelCase = load_metric(os.path.join('''metrics''' , '''seqeval''' ) )
__lowerCamelCase = '''ERROR'''
__lowerCamelCase = f'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(__lowerCAmelCase , match=re.escape(__lowerCAmelCase ) ):
metric.compute(predictions=[] , references=[] , scheme=__lowerCAmelCase )
| 339 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
SCREAMING_SNAKE_CASE__ : str = ""
SCREAMING_SNAKE_CASE__ : Any = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = ""
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1 # (0 is vertical, 1 is horizontal)
def __magic_name__ ( ) -> None:
__lowerCamelCase , __lowerCamelCase = get_dataset(__lowerCAmelCase , __lowerCAmelCase )
print('''Processing...''' )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase = update_image_and_anno(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for index, image in enumerate(__lowerCAmelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__lowerCamelCase = random_chars(32 )
__lowerCamelCase = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__lowerCamelCase = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(f'''/{file_root}.jpg''' , __lowerCAmelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f'''Success {index+1}/{len(__lowerCAmelCase )} with {file_name}''' )
__lowerCamelCase = []
for anno in new_annos[index]:
__lowerCamelCase = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(__lowerCAmelCase )
with open(f'''/{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __magic_name__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ) -> tuple[list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
for label_file in glob.glob(os.path.join(__lowerCAmelCase , '''*.txt''' ) ):
__lowerCamelCase = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCAmelCase ) as in_file:
__lowerCamelCase = in_file.readlines()
__lowerCamelCase = os.path.join(__lowerCAmelCase , f'''{label_name}.jpg''' )
__lowerCamelCase = []
for obj_list in obj_lists:
__lowerCamelCase = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCAmelCase )
labels.append(__lowerCAmelCase )
return img_paths, labels
def __magic_name__ ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int = 1 ) -> tuple[list, list, list]:
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = []
for idx in range(len(__lowerCAmelCase ) ):
__lowerCamelCase = []
__lowerCamelCase = img_list[idx]
path_list.append(__lowerCAmelCase )
__lowerCamelCase = anno_list[idx]
__lowerCamelCase = cva.imread(__lowerCAmelCase )
if flip_type == 1:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
__lowerCamelCase = cva.flip(__lowerCAmelCase , __lowerCAmelCase )
for bbox in img_annos:
__lowerCamelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCAmelCase )
new_imgs_list.append(__lowerCAmelCase )
return new_imgs_list, new_annos_lists, path_list
def __magic_name__ ( __lowerCAmelCase : int = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
__lowerCamelCase = ascii_lowercase + digits
return "".join(random.choice(__lowerCAmelCase ) for _ in range(__lowerCAmelCase ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 339 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__SCREAMING_SNAKE_CASE :Any = '''Run commands across TPU VMs for initial setup before running `accelerate launch`.'''
def UpperCAmelCase_ ( __lowercase : Optional[int]=None ) -> List[str]:
'''simple docstring'''
if subparsers is not None:
_UpperCAmelCase = subparsers.add_parser("tpu-config" , description=_description )
else:
_UpperCAmelCase = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
_UpperCAmelCase = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=__lowercase , default=__lowercase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=__lowercase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=__lowercase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
_UpperCAmelCase = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=__lowercase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=__lowercase )
return parser
def UpperCAmelCase_ ( __lowercase : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__lowercase ):
_UpperCAmelCase = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_UpperCAmelCase = defaults.command_file
if not args.command and defaults.commands is not None:
_UpperCAmelCase = defaults.commands
if not args.tpu_name:
_UpperCAmelCase = defaults.tpu_name
if not args.tpu_zone:
_UpperCAmelCase = defaults.tpu_zone
if args.accelerate_version == "dev":
_UpperCAmelCase = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
_UpperCAmelCase = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , __lowercase ):
_UpperCAmelCase = f'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
_UpperCAmelCase = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __lowercase ):
_UpperCAmelCase = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_UpperCAmelCase = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [f'pip install {args.accelerate_version}']
new_cmd += args.command
_UpperCAmelCase = "; ".join(__lowercase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_UpperCAmelCase = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f'Running {" ".join(__lowercase )}' )
return
subprocess.run(__lowercase )
print("Successfully setup pod." )
def UpperCAmelCase_ ( ) -> str:
'''simple docstring'''
_UpperCAmelCase = tpu_command_parser()
_UpperCAmelCase = parser.parse_args()
tpu_command_launcher(__lowercase )
| 22 | # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = "Salesforce/blip-image-captioning-base"
lowerCAmelCase_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
lowerCAmelCase_ = "image_captioner"
lowerCAmelCase_ = AutoModelForVisionaSeq
lowerCAmelCase_ = ["image"]
lowerCAmelCase_ = ["text"]
def __init__( self : List[Any] , *_lowercase : Optional[int] , **_lowercase : Union[str, Any] ):
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : "Image" ):
"""simple docstring"""
return self.pre_processor(images=_lowercase , return_tensors="""pt""" )
def __a ( self : Union[str, Any] , _lowercase : Optional[int] ):
"""simple docstring"""
return self.model.generate(**_lowercase )
def __a ( self : int , _lowercase : Any ):
"""simple docstring"""
return self.pre_processor.batch_decode(_lowercase , skip_special_tokens=_lowercase )[0].strip()
| 219 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
def _a ( _lowerCamelCase , _lowerCamelCase=False ) -> List[Any]:
"""simple docstring"""
__snake_case : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
("""pre_logits.fc.weight""", """pooler.dense.weight"""),
("""pre_logits.fc.bias""", """pooler.dense.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__snake_case : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ) -> Optional[int]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
__snake_case : Any = """"""
else:
__snake_case : Tuple = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__snake_case : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
__snake_case : Optional[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
__snake_case : Any = in_proj_weight[
: config.hidden_size, :
]
__snake_case : Optional[int] = in_proj_bias[: config.hidden_size]
__snake_case : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__snake_case : int = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__snake_case : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
__snake_case : Tuple = in_proj_bias[-config.hidden_size :]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = dct.pop(_lowerCamelCase )
__snake_case : Optional[Any] = val
def _a ( ) -> Dict:
"""simple docstring"""
__snake_case : int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : str = ViTConfig()
__snake_case : str = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
__snake_case : Dict = True
__snake_case : Union[str, Any] = int(vit_name[-12:-10] )
__snake_case : int = int(vit_name[-9:-6] )
else:
__snake_case : Union[str, Any] = 1000
__snake_case : List[str] = """huggingface/label-files"""
__snake_case : List[str] = """imagenet-1k-id2label.json"""
__snake_case : Union[str, Any] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
__snake_case : int = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
__snake_case : List[str] = idalabel
__snake_case : int = {v: k for k, v in idalabel.items()}
__snake_case : Tuple = int(vit_name[-6:-4] )
__snake_case : Any = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith("""tiny""" ):
__snake_case : str = 192
__snake_case : str = 768
__snake_case : Union[str, Any] = 12
__snake_case : int = 3
elif vit_name[9:].startswith("""small""" ):
__snake_case : Optional[int] = 384
__snake_case : Optional[Any] = 1536
__snake_case : List[str] = 12
__snake_case : Tuple = 6
else:
pass
else:
if vit_name[4:].startswith("""small""" ):
__snake_case : Optional[Any] = 768
__snake_case : Optional[Any] = 2304
__snake_case : Optional[Any] = 8
__snake_case : List[Any] = 8
elif vit_name[4:].startswith("""base""" ):
pass
elif vit_name[4:].startswith("""large""" ):
__snake_case : Optional[int] = 1024
__snake_case : str = 4096
__snake_case : Optional[Any] = 24
__snake_case : List[str] = 16
elif vit_name[4:].startswith("""huge""" ):
__snake_case : List[str] = 1280
__snake_case : List[Any] = 5120
__snake_case : int = 32
__snake_case : Dict = 16
# load original model from timm
__snake_case : List[Any] = timm.create_model(_lowerCamelCase , pretrained=_lowerCamelCase )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
__snake_case : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(_lowerCamelCase )
__snake_case : List[Any] = create_rename_keys(_lowerCamelCase , _lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_q_k_v(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# load HuggingFace model
if vit_name[-5:] == "in21k":
__snake_case : str = ViTModel(_lowerCamelCase ).eval()
else:
__snake_case : List[Any] = ViTForImageClassification(_lowerCamelCase ).eval()
model.load_state_dict(_lowerCamelCase )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
__snake_case : Union[str, Any] = DeiTImageProcessor(size=config.image_size )
else:
__snake_case : Tuple = ViTImageProcessor(size=config.image_size )
__snake_case : List[str] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__snake_case : Union[str, Any] = encoding["""pixel_values"""]
__snake_case : Optional[int] = model(_lowerCamelCase )
if base_model:
__snake_case : Tuple = timm_model.forward_features(_lowerCamelCase )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_lowerCamelCase , outputs.pooler_output , atol=1E-3 )
else:
__snake_case : List[Any] = timm_model(_lowerCamelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_lowerCamelCase , outputs.logits , atol=1E-3 )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_lowerCamelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--vit_name",
default="vit_base_patch16_224",
type=str,
help="Name of the ViT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
__UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 13 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( __lowercase , unittest.TestCase ):
lowercase__: int = KandinskyImgaImgPipeline
lowercase__: Any = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowercase__: int = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowercase__: List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase__: Any = False
@property
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : str ) -> str:
"""simple docstring"""
return 32
@property
def lowercase__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowercase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase__ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
return 1_00
@property
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__snake_case : str = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__snake_case : Tuple = MultilingualCLIP(__magic_name__ )
__snake_case : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__snake_case : Tuple = UNetaDConditionModel(**__magic_name__ )
return model
@property
def lowercase__ ( self : str ) -> Dict:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
__snake_case : int = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : Tuple ) -> str:
"""simple docstring"""
__snake_case : Tuple = self.dummy_text_encoder
__snake_case : Dict = self.dummy_tokenizer
__snake_case : Dict = self.dummy_unet
__snake_case : int = self.dummy_movq
__snake_case : List[Any] = {
"""num_train_timesteps""": 10_00,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
__snake_case : Dict = DDIMScheduler(**__magic_name__ )
__snake_case : Any = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self : str , __magic_name__ : str , __magic_name__ : Union[str, Any]=0 ) -> str:
"""simple docstring"""
__snake_case : Dict = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__magic_name__ )
# create init_image
__snake_case : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
__snake_case : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case : Optional[int] = Image.fromarray(np.uinta(__magic_name__ ) ).convert("""RGB""" ).resize((2_56, 2_56) )
if str(__magic_name__ ).startswith("""mps""" ):
__snake_case : str = torch.manual_seed(__magic_name__ )
else:
__snake_case : str = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
__snake_case : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self : int ) -> str:
"""simple docstring"""
__snake_case : Dict = """cpu"""
__snake_case : Union[str, Any] = self.get_dummy_components()
__snake_case : List[str] = self.pipeline_class(**__magic_name__ )
__snake_case : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = pipe(**self.get_dummy_inputs(__magic_name__ ) )
__snake_case : List[str] = output.images
__snake_case : Any = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
__snake_case : Optional[int] = image[0, -3:, -3:, -1]
__snake_case : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case : int = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def lowercase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Optional[int] ) -> str:
"""simple docstring"""
__snake_case : Union[str, Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
__snake_case : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
__snake_case : List[Any] = """A red cartoon frog, 4k"""
__snake_case : str = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
__snake_case : Union[str, Any] = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
__snake_case : Any = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
__snake_case : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
__snake_case , __snake_case : Optional[Any] = pipe_prior(
__magic_name__ , generator=__magic_name__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__snake_case : List[str] = pipeline(
__magic_name__ , image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , generator=__magic_name__ , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="""np""" , )
__snake_case : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 13 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
lowerCAmelCase = []
def generate(snake_case__ : int , snake_case__ : list ):
lowerCAmelCase = [0] * n
res.append(tuple(snake_case__ ) )
lowerCAmelCase = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowerCAmelCase , lowerCAmelCase = arr[i], arr[0]
else:
lowerCAmelCase , lowerCAmelCase = arr[i], arr[c[i]]
res.append(tuple(snake_case__ ) )
c[i] += 1
lowerCAmelCase = 0
else:
lowerCAmelCase = 0
i += 1
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
a = input('Enter numbers separated by a comma:\n').strip()
a = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 155 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase : Any = """examples/"""
lowerCAmelCase : List[Any] = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
lowerCAmelCase : Any = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
lowerCAmelCase : Optional[int] = """README.md"""
def lowercase (_A , _A , _A ):
"""simple docstring"""
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : List[Any] = f.read()
_lowerCAmelCase , _lowerCAmelCase : List[str] = REPLACE_PATTERNS[pattern]
_lowerCAmelCase : str = replace.replace('VERSION' , SCREAMING_SNAKE_CASE__ )
_lowerCAmelCase : Union[str, Any] = re_pattern.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def lowercase (_A ):
"""simple docstring"""
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ , pattern='examples' )
def lowercase (_A , _A=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : str = '🤗 Transformers currently provides the following architectures'
_lowerCAmelCase : Optional[int] = '1. Want to contribute a new model?'
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
_lowerCAmelCase : Optional[int] = f.readlines()
# Find the start of the list.
_lowerCAmelCase : str = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_lowerCAmelCase : Optional[int] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
_lowerCAmelCase : int = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(SCREAMING_SNAKE_CASE__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r' ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
_lowerCAmelCase : Any = REPLACE_PATTERNS['init'][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def lowercase (_A=False ):
"""simple docstring"""
_lowerCAmelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
_lowerCAmelCase : Tuple = default_version.base_version
elif patch:
_lowerCAmelCase : str = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_lowerCAmelCase : Tuple = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_lowerCAmelCase : Any = input(f'Which version are you releasing? [{default_version}]' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
_lowerCAmelCase : Any = default_version
print(f'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE__ , patch=SCREAMING_SNAKE_CASE__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : int = get_version()
_lowerCAmelCase : Dict = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
_lowerCAmelCase : Optional[Any] = current_version.base_version
# Check with the user we got that right.
_lowerCAmelCase : List[Any] = input(f'Which version are we developing now? [{dev_version}]' )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
_lowerCAmelCase : Optional[int] = dev_version
print(f'Updating version to {version}.' )
global_version_update(SCREAMING_SNAKE_CASE__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
lowerCAmelCase : Optional[Any] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 366 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyVaaInpaintPipeline
__magic_name__ = ["image_embeds", "negative_image_embeds", "image", "mask_image"]
__magic_name__ = [
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
__magic_name__ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
__magic_name__ = False
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return 32
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def a ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a ( self ):
'''simple docstring'''
return 100
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Optional[int] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_lowerCAmelCase : Union[str, Any] = UNetaDConditionModel(**snake_case__ )
return model
@property
def a ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.dummy_unet
_lowerCAmelCase : List[Any] = self.dummy_movq
_lowerCAmelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=snake_case__ , set_alpha_to_one=snake_case__ , steps_offset=1 , prediction_type='epsilon' , thresholding=snake_case__ , )
_lowerCAmelCase : Any = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
snake_case__ )
# create init_image
_lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(snake_case__ ) ).to(snake_case__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(snake_case__ ) ).convert('RGB' ).resize((256, 256) )
# create mask
_lowerCAmelCase : List[str] = np.ones((64, 64) , dtype=np.floataa )
_lowerCAmelCase : Dict = 0
if str(snake_case__ ).startswith('mps' ):
_lowerCAmelCase : Optional[Any] = torch.manual_seed(snake_case__ )
else:
_lowerCAmelCase : List[Any] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ )
_lowerCAmelCase : Optional[int] = {
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = 'cpu'
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : Dict = self.pipeline_class(**snake_case__ )
_lowerCAmelCase : Optional[int] = pipe.to(snake_case__ )
pipe.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(snake_case__ ) )
_lowerCAmelCase : int = output.images
_lowerCAmelCase : int = pipe(
**self.get_dummy_inputs(snake_case__ ) , return_dict=snake_case__ , )[0]
_lowerCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
_lowerCAmelCase : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
print(F'image.shape {image.shape}' )
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : List[str] = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' )
_lowerCAmelCase : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
_lowerCAmelCase : Dict = np.ones((768, 768) , dtype=np.floataa )
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : List[str] = 'a hat'
_lowerCAmelCase : Any = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(snake_case__ )
_lowerCAmelCase : Union[str, Any] = KandinskyVaaInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa )
_lowerCAmelCase : Optional[Any] = pipeline.to(snake_case__ )
pipeline.set_progress_bar_config(disable=snake_case__ )
_lowerCAmelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
snake_case__ , generator=snake_case__ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
_lowerCAmelCase : Optional[Any] = pipeline(
image=snake_case__ , mask_image=snake_case__ , image_embeds=snake_case__ , negative_image_embeds=snake_case__ , generator=snake_case__ , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
_lowerCAmelCase : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(snake_case__ , snake_case__ )
| 25 | 0 |
"""simple docstring"""
import cva
import numpy as np
class __A :
"""simple docstring"""
def __init__( self , __A , __A ) -> int:
if k in (0.04, 0.06):
a =k
a =window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self ) -> str:
return str(self.k )
def SCREAMING_SNAKE_CASE ( self , __A ) -> tuple[cva.Mat, list[list[int]]]:
a =cva.imread(__A , 0 )
a , a =img.shape
a =[]
a =img.copy()
a =cva.cvtColor(__A , cva.COLOR_GRAY2RGB )
a , a =np.gradient(__A )
a =dx**2
a =dy**2
a =dx * dy
a =0.04
a =self.window_size // 2
for y in range(__A , h - offset ):
for x in range(__A , w - offset ):
a =ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
a =(wxx * wyy) - (wxy**2)
a =wxx + wyy
a =det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase_ : List[Any] = HarrisCorner(0.04, 3)
lowerCamelCase_ , lowerCamelCase_ : int = edge_detect.detect("""path_to_image""")
cva.imwrite("""detect.png""", color_img) | 81 |
'''simple docstring'''
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
with open(UpperCamelCase , """rb""" ) as flax_state_f:
lowerCAmelCase__ : Union[str, Any] = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith("""version""" ):
raise OSError(
"""You seem to have cloned a repository without having git-lfs installed. Please"""
""" install git-lfs and run `git lfs install` followed by `git lfs pull` in the"""
""" folder you cloned.""" )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
lowerCAmelCase__ : str = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
lowerCAmelCase__ : Dict = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
lowerCAmelCase__ : Any = """"""
lowerCAmelCase__ : Any = flatten_dict(UpperCamelCase , sep=""".""" )
lowerCAmelCase__ : Optional[int] = pt_model.state_dict()
# keep track of unexpected & missing keys
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : int = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
lowerCAmelCase__ : Union[str, Any] = flax_key_tuple.split(""".""" )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
lowerCAmelCase__ : Optional[int] = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
lowerCAmelCase__ : str = flax_key_tuple_array[:-1] + ["""weight"""]
lowerCAmelCase__ : Any = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
lowerCAmelCase__ : int = flax_key_tuple_array[:-1] + ["""weight"""]
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
lowerCAmelCase__ : List[str] = (
flax_key_tuple_string.replace("""_0""" , """.0""" )
.replace("""_1""" , """.1""" )
.replace("""_2""" , """.2""" )
.replace("""_3""" , """.3""" )
.replace("""_4""" , """.4""" )
.replace("""_5""" , """.5""" )
.replace("""_6""" , """.6""" )
.replace("""_7""" , """.7""" )
.replace("""_8""" , """.8""" )
.replace("""_9""" , """.9""" )
)
lowerCAmelCase__ : Union[str, Any] = """.""".join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
f"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
lowerCAmelCase__ : int = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
lowerCAmelCase__ : int = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
lowerCAmelCase__ : Optional[int] = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
f""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
if len(UpperCamelCase ) > 0:
logger.warning(
f"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
f""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
""" use it for predictions and inference.""" )
return pt_model
| 37 | 0 |
'''simple docstring'''
from __future__ import annotations
def A__ ( UpperCAmelCase_ ):
return [ord(UpperCAmelCase_ ) - 9_6 for elem in plain]
def A__ ( UpperCAmelCase_ ):
return "".join(chr(elem + 9_6 ) for elem in encoded )
def A__ ( ):
_UpperCamelCase : List[str] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , UpperCAmelCase_ )
print('Decoded:' , decode(UpperCAmelCase_ ) )
if __name__ == "__main__":
main()
| 236 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
def A__ ( UpperCAmelCase_ ):
_UpperCamelCase , _UpperCamelCase : Optional[int] = np.shape(UpperCAmelCase_ )
if rows != columns:
_UpperCamelCase : Union[str, Any] = (
'\'table\' has to be of square shaped array but got a '
f'{rows}x{columns} array:\n{table}'
)
raise ValueError(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = np.zeros((rows, columns) )
_UpperCamelCase : Tuple = np.zeros((rows, columns) )
for i in range(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists' )
_UpperCamelCase : Optional[Any] = (table[i][j] - total) / upper[j][j]
_UpperCamelCase : int = 1
for j in range(UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[int] = sum(lower[i][k] * upper[k][j] for k in range(UpperCAmelCase_ ) )
_UpperCamelCase : Tuple = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 236 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = ["image_processor", "tokenizer"]
A__ = "ViltImageProcessor"
A__ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[Any] , __lowerCamelCase : List[str]=None , __lowerCamelCase : str=None , **__lowerCamelCase : List[str] ):
'''simple docstring'''
lowerCamelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowerCamelCase , )
lowerCamelCase__ : Union[str, Any] = kwargs.pop("feature_extractor" )
lowerCamelCase__ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Any = self.image_processor
def __call__( self : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : Union[bool, str, PaddingStrategy] = False , __lowerCamelCase : Union[bool, str, TruncationStrategy] = None , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : int = 0 , __lowerCamelCase : Optional[int] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : Optional[bool] = None , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = False , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Union[str, TensorType]] = None , **__lowerCamelCase : List[str] , ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
# add pixel_values + pixel_mask
lowerCamelCase__ : str = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
encoding.update(__lowerCamelCase )
return encoding
def lowerCAmelCase ( self : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def lowerCAmelCase ( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = self.tokenizer.model_input_names
lowerCamelCase__ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowerCamelCase , )
return self.image_processor_class
@property
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowerCamelCase , )
return self.image_processor
| 184 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__)
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = field(default="automatic-speech-recognition" , metadata={"include_in_asdict_even_if_is_default": True})
A__ = Features({"audio": Audio()})
A__ = Features({"transcription": Value("string")})
A__ = "audio"
A__ = "transcription"
def lowerCAmelCase ( self : Any , __lowerCamelCase : int ):
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , __lowerCamelCase ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
lowerCamelCase__ : Tuple = copy.deepcopy(self )
lowerCamelCase__ : Tuple = self.input_schema.copy()
lowerCamelCase__ : Optional[int] = features[self.audio_column]
lowerCamelCase__ : int = input_schema
return task_template
@property
def lowerCAmelCase ( self : int ):
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 184 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
snake_case_ = (
F'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
F''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
snake_case_ = dict(scheduler.config )
snake_case_ = 1
snake_case_ = FrozenDict(snake_case__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
snake_case_ = (
F'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , snake_case__ , standard_warn=snake_case__ )
snake_case_ = dict(scheduler.config )
snake_case_ = True
snake_case_ = FrozenDict(snake_case__ )
if safety_checker is None:
logger.warning(
F'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=snake_case__ , segmentation_processor=snake_case__ , vae=snake_case__ , text_encoder=snake_case__ , tokenizer=snake_case__ , unet=snake_case__ , scheduler=snake_case__ , safety_checker=snake_case__ , feature_extractor=snake_case__ , )
def UpperCamelCase__ ( self , _UpperCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(snake_case__ )
def UpperCamelCase__ ( self ):
self.enable_attention_slicing(snake_case__ )
def UpperCamelCase__ ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
snake_case_ = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(snake_case__ , snake_case__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase__ ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 5_12 , _UpperCAmelCase = 5_12 , _UpperCAmelCase = 50 , _UpperCAmelCase = 7.5 , _UpperCAmelCase = None , _UpperCAmelCase = 1 , _UpperCAmelCase = 0.0 , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = "pil" , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = 1 , **_UpperCAmelCase , ):
snake_case_ = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
snake_case_ = self.segmentation_model(**snake_case__ )
snake_case_ = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
snake_case_ = self.numpy_to_pil(snake_case__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
snake_case_ = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=snake_case__ , image=snake_case__ , mask_image=snake_case__ , height=snake_case__ , width=snake_case__ , num_inference_steps=snake_case__ , guidance_scale=snake_case__ , negative_prompt=snake_case__ , num_images_per_prompt=snake_case__ , eta=snake_case__ , generator=snake_case__ , latents=snake_case__ , output_type=snake_case__ , return_dict=snake_case__ , callback=snake_case__ , callback_steps=snake_case__ , ) | 360 |
import numpy as np
import datasets
UpperCAmelCase = """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
UpperCAmelCase = """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
UpperCAmelCase = """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ),
} ) , )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
# convert to numpy arrays
snake_case_ = np.array(_UpperCAmelCase )
snake_case_ = np.array(_UpperCAmelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError('''Expected `X` to be a 2D vector''' )
if len(reference_distribution.shape ) != 2:
raise ValueError('''Expected `reference_distribution` to be a 2D vector''' )
if reference_distribution.shape[0] < 2:
raise ValueError(
'''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' )
# Get mahalanobis distance for each prediction
snake_case_ = X - np.mean(_UpperCAmelCase )
snake_case_ = np.cov(reference_distribution.T )
try:
snake_case_ = np.linalg.inv(_UpperCAmelCase )
except np.linalg.LinAlgError:
snake_case_ = np.linalg.pinv(_UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ = np.dot(_UpperCAmelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist} | 267 | 0 |
from __future__ import annotations
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if b == 0:
return (1, 0)
((lowercase__) , (lowercase__)) = extended_euclid(SCREAMING_SNAKE_CASE , a % b )
lowercase__ = a // b
return (y, x - k * y)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
((lowercase__) , (lowercase__)) = extended_euclid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
((lowercase__) , (lowercase__)) = extended_euclid(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if b < 0:
lowercase__ = (b % n + n) % n
return b
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ , lowercase__ = invert_modulo(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), invert_modulo(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = na * na
lowercase__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True)
| 110 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _a ( UpperCamelCase__ , unittest.TestCase ):
_lowercase : Union[str, Any] = KandinskyVaaImgaImgPipeline
_lowercase : Tuple = ['''image_embeds''', '''negative_image_embeds''', '''image''']
_lowercase : Any = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
_lowercase : Union[str, Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase : Optional[Any] = False
@property
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return 32
@property
def lowerCamelCase_ ( self: Any ) -> Any:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCamelCase_ ( self: Tuple ) -> Any:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self: List[Any] ) -> Optional[Any]:
"""simple docstring"""
return 100
@property
def lowerCamelCase_ ( self: int ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowercase__ = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCamelCase_ ( self: Optional[Any] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCamelCase_ ( self: Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = {
'''num_train_timesteps''': 1_000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
lowercase__ = DDIMScheduler(**UpperCamelCase_ )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: Optional[Any] , UpperCamelCase_: Optional[int]=0 ) -> Optional[int]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase_ )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('''RGB''' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('''mps''' ):
lowercase__ = torch.manual_seed(UpperCamelCase_ )
else:
lowercase__ = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
lowercase__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowerCamelCase_ ( self: Optional[int] ) -> Dict:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCamelCase_ )
lowercase__ = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_img2img_frog.npy''' )
lowercase__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowercase__ = '''A red cartoon frog, 4k'''
lowercase__ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
lowercase__ = KandinskyVaaImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowercase__ = pipeline(
image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='''np''' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110 | 1 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowerCamelCase ( __magic_name__ : List[str] , __magic_name__ : str , __magic_name__ : str , __magic_name__ : Path , __magic_name__ : str = None , __magic_name__ : str = None , __magic_name__ : str = None , ):
if config_name_or_path is None:
a__: List[str] ="facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
a__: Any =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a__: List[Any] =question_encoder_name_or_path
a__: Any =RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
a__: str =RagConfig.from_pretrained(_a )
a__: Any =AutoConfig.from_pretrained(_a )
a__: Union[str, Any] =AutoConfig.from_pretrained(_a )
a__: Dict =gen_config
a__: Tuple =question_encoder_config
a__: Tuple =model_class.from_pretrained_question_encoder_generator(
_a , _a , config=_a )
rag_model.save_pretrained(_a )
# Sanity check.
model_class.from_pretrained(_a )
# Save tokenizers.
a__: List[Any] =AutoTokenizer.from_pretrained(_a )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
a__: List[str] =AutoTokenizer.from_pretrained(_a )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 360 |
def __lowerCamelCase ( __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
a__: List[str] =F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
a__: Union[str, Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
a__: List[Any] =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = IFPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return self._get_dummy_components()
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
A : str = torch.manual_seed(SCREAMING_SNAKE_CASE )
else:
A : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(SCREAMING_SNAKE_CASE )
A : Any = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self._test_save_load_local()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Optional[int] = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa )
A : str = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' , variant='''fp16''' , torch_dtype=torch.floataa , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''' )
A, A : Optional[Any] = pipe_a.encode_prompt('''anime turtle''' , device='''cuda''' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A : List[Any] = None
A : List[Any] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A : Optional[Any] = IFImgaImgPipeline(**pipe_a.components )
A : Union[str, Any] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A : str = IFInpaintingPipeline(**pipe_a.components )
A : Any = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_start_torch_memory_measurement()
A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , )
A : Any = output.images[0]
assert image.shape == (64, 64, 3)
A : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
A : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
A : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : List[str] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
A : List[str] = output.images[0]
assert image.shape == (256, 256, 3)
A : Tuple = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_start_torch_memory_measurement()
A : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Any = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , )
A : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
A : Any = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
A : Any = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Union[str, Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : str = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : Tuple = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
A : Any = output.images[0]
assert image.shape == (256, 256, 3)
A : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A : List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
_start_torch_memory_measurement()
A : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE )
A : Optional[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : List[Any] = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , num_inference_steps=2 , generator=SCREAMING_SNAKE_CASE , output_type='''np''' , )
A : Any = output.images[0]
assert image.shape == (64, 64, 3)
A : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
A : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# pipeline 2
_start_torch_memory_measurement()
A : int = torch.Generator(device='''cpu''' ).manual_seed(0 )
A : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
A : Optional[int] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE )
A : Dict = pipe_a(
prompt_embeds=SCREAMING_SNAKE_CASE , negative_prompt_embeds=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , original_image=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type='''np''' , )
A : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
A : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
A : Any = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''' )
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 3 |
'''simple docstring'''
from scipy.stats import pearsonr
import datasets
lowercase : Optional[int] = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
lowercase : Optional[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
lowercase : str = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
"""simple docstring"""
if return_pvalue:
A : Union[str, Any] = pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )[0] )}
| 3 | 1 |
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__UpperCamelCase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( UpperCamelCase_):
def __init__( self : Dict , UpperCamelCase__ : CLIPSegForImageSegmentation , UpperCamelCase__ : CLIPSegProcessor , UpperCamelCase__ : AutoencoderKL , UpperCamelCase__ : CLIPTextModel , UpperCamelCase__ : CLIPTokenizer , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase__ : StableDiffusionSafetyChecker , UpperCamelCase__ : CLIPImageProcessor , ):
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
SCREAMING_SNAKE_CASE : Any = (
f"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
f""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : str = 1
SCREAMING_SNAKE_CASE : int = FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
SCREAMING_SNAKE_CASE : List[str] = (
f"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = dict(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : int = FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def __A ( self : List[Any] , UpperCamelCase__ : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE : List[str] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
def __A ( self : List[str] ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
SCREAMING_SNAKE_CASE : List[str] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __A ( self : Optional[int] ):
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase__ : Union[str, List[str]] , UpperCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase__ : str , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 50 , UpperCamelCase__ : float = 7.5 , UpperCamelCase__ : Optional[Union[str, List[str]]] = None , UpperCamelCase__ : Optional[int] = 1 , UpperCamelCase__ : float = 0.0 , UpperCamelCase__ : Optional[torch.Generator] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase__ : int = 1 , **UpperCamelCase__ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
SCREAMING_SNAKE_CASE : Optional[Any] = self.segmentation_model(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
SCREAMING_SNAKE_CASE : Dict = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 258 | from __future__ import annotations
import numpy as np
def A ( _lowercase ):
return np.maximum(0 , _lowercase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 258 | 1 |
__lowerCAmelCase : Optional[int] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__lowerCAmelCase : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__lowerCAmelCase : int ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 9 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _A ( ):
"""simple docstring"""
a =ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=lowercase , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=lowercase , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=lowercase )
return parser.parse_args()
def _A ( ):
"""simple docstring"""
a =parse_args()
# Import training_script as a module.
a =Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
a =script_fpath.stem
a =importlib.import_module(lowercase )
# Patch sys.argv
a =[args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 81 | 0 |
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = int(__lowerCamelCase )
# Initialize Result
lowerCAmelCase_ = []
# Traverse through all denomination
for denomination in reversed(__lowerCamelCase ):
# Find denominations
while int(__lowerCamelCase ) >= int(__lowerCamelCase ):
total_value -= int(__lowerCamelCase )
answer.append(__lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCamelCase_ = []
lowerCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
lowerCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(f'''Denomination {i}: ''').strip()))
lowerCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCamelCase_ = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
lowerCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(f'''Following is minimal change for {value}: ''')
lowerCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 350 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class a_ ( a_ ):
'''simple docstring'''
__a: str = ['''vqvae''']
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> Tuple:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowercase_ , scheduler=lowercase_ , mel=lowercase_ , vqvae=lowercase_ )
def _lowercase ( self ) -> int:
'''simple docstring'''
return 5_0 if isinstance(self.scheduler , lowercase_ ) else 1_0_0_0
@torch.no_grad()
def __call__( self , lowercase_ = 1 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = 0 , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
lowerCAmelCase_ = steps or self.get_default_steps()
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowerCAmelCase_ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowerCAmelCase_ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=lowercase_ , device=self.device , )
lowerCAmelCase_ = noise
lowerCAmelCase_ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(lowercase_ , lowercase_ )
lowerCAmelCase_ = self.mel.audio_slice_to_image(lowercase_ )
lowerCAmelCase_ = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowerCAmelCase_ = (input_image / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowerCAmelCase_ = self.vqvae.encode(torch.unsqueeze(lowercase_ , 0 ) ).latent_dist.sample(
generator=lowercase_ )[0]
lowerCAmelCase_ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , self.scheduler.timesteps[start_step - 1] )
lowerCAmelCase_ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowerCAmelCase_ = int(mask_start_secs * pixels_per_second )
lowerCAmelCase_ = int(mask_end_secs * pixels_per_second )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , lowercase_ ):
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ , lowercase_ )['sample']
else:
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
if isinstance(self.scheduler , lowercase_ ):
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , eta=lowercase_ , generator=lowercase_ , )['prev_sample']
else:
lowerCAmelCase_ = self.scheduler.step(
model_output=lowercase_ , timestep=lowercase_ , sample=lowercase_ , generator=lowercase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowerCAmelCase_ = mask[:, step, :, :mask_start]
if mask_end > 0:
lowerCAmelCase_ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowerCAmelCase_ = 1 / self.vqvae.config.scaling_factor * images
lowerCAmelCase_ = self.vqvae.decode(lowercase_ )['sample']
lowerCAmelCase_ = (images / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowerCAmelCase_ = (images * 2_5_5).round().astype('uint8' )
lowerCAmelCase_ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(lowercase_ , mode='RGB' ).convert('L' ) for _ in images) )
lowerCAmelCase_ = [self.mel.image_to_audio(lowercase_ ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(lowercase_ )[:, np.newaxis, :] ) , **ImagePipelineOutput(lowercase_ ) )
@torch.no_grad()
def _lowercase ( self , lowercase_ , lowercase_ = 5_0 ) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , lowercase_ )
self.scheduler.set_timesteps(lowercase_ )
lowerCAmelCase_ = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowerCAmelCase_ = (sample / 2_5_5) * 2 - 1
lowerCAmelCase_ = torch.Tensor(lowercase_ ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowerCAmelCase_ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowerCAmelCase_ = self.scheduler.alphas_cumprod[t]
lowerCAmelCase_ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowerCAmelCase_ = 1 - alpha_prod_t
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ )['sample']
lowerCAmelCase_ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowerCAmelCase_ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowerCAmelCase_ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _lowercase ( lowercase_ , lowercase_ , lowercase_ ) -> torch.Tensor:
'''simple docstring'''
lowerCAmelCase_ = acos(torch.dot(torch.flatten(lowercase_ ) , torch.flatten(lowercase_ ) ) / torch.norm(lowercase_ ) / torch.norm(lowercase_ ) )
return sin((1 - alpha) * theta ) * xa / sin(lowercase_ ) + sin(alpha * theta ) * xa / sin(lowercase_ )
| 14 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ : List[Any] ={'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int =['''DeiTFeatureExtractor''']
lowerCAmelCase__ : int =['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str =[
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 257 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ) -> List[str]:
__SCREAMING_SNAKE_CASE = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert('RGB' )
return image
def __lowercase ( a__ ) -> Dict:
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( a__ , a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = dct.pop(a__ )
__SCREAMING_SNAKE_CASE = val
def __lowercase ( a__ , a__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(a__ , requires_grad=a__ ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __lowercase ( a__ , a__ ) -> int:
__SCREAMING_SNAKE_CASE = 3_64 if 'coco' in model_name else 2_24
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=a__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=a__ ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=a__ ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=a__ , text_config=a__ )
return config, image_size
@torch.no_grad()
def __lowercase ( a__ , a__=None , a__=False ) -> Any:
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
__SCREAMING_SNAKE_CASE = tokenizer('\n' , add_special_tokens=a__ ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(a__ , eos_token_id=a__ )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(a__ ).eval()
__SCREAMING_SNAKE_CASE = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
__SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=a__ , model_type=a__ , is_eval=a__ , device=a__ )
original_model.eval()
print('Done!' )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(a__ )
for src, dest in rename_keys:
rename_key(a__ , a__ , a__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(a__ )
if key.startswith('Qformer.bert' ):
__SCREAMING_SNAKE_CASE = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace('self' , 'attention' )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
__SCREAMING_SNAKE_CASE = key.replace('opt' , 'language' )
if key.startswith('t5' ):
__SCREAMING_SNAKE_CASE = key.replace('t5' , 'language' )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(a__ , strict=a__ )
assert len(a__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors['eval'](a__ ).unsqueeze(0 ).to(a__ )
__SCREAMING_SNAKE_CASE = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(a__ )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=a__ , image_std=a__ )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=a__ , tokenizer=a__ )
__SCREAMING_SNAKE_CASE = processor(images=a__ , return_tensors='pt' ).pixel_values.to(a__ )
# make sure processor creates exact same pixel values
assert torch.allclose(a__ , a__ )
original_model.to(a__ )
hf_model.to(a__ )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
__SCREAMING_SNAKE_CASE = hf_model(a__ , a__ , labels=a__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=a__ )
assert torch.allclose(logits[0, :3, :3] , a__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=a__ )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(a__ ) , a__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
__SCREAMING_SNAKE_CASE = ''
__SCREAMING_SNAKE_CASE = tokenizer(a__ , return_tensors='pt' ).input_ids.to(a__ )
__SCREAMING_SNAKE_CASE = original_model.generate({'image': original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
a__ , a__ , do_sample=a__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , a__ )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=a__ )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print('HF generation:' , a__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(a__ )
hf_model.save_pretrained(a__ )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
lowerCAmelCase__ : Dict =argparse.ArgumentParser()
lowerCAmelCase__ : Union[str, Any] =[
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
lowerCAmelCase__ : int =parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 257 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int ):
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise TypeError('''only integers accepted as input''' )
else:
__lowercase = str(abs(lowerCamelCase_ ) )
__lowercase = [list(lowerCamelCase_ ) for char in range(len(lowerCamelCase_ ) )]
for index in range(len(lowerCamelCase_ ) ):
num_transpositions[index].pop(lowerCamelCase_ )
return max(
int(''''''.join(list(lowerCamelCase_ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 365 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : List[Any] = CanineTokenizer
a : Union[str, Any] = False
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
super().setUp()
__lowercase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> CanineTokenizer:
'''simple docstring'''
__lowercase = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
__lowercase = 1024
return tokenizer
@require_torch
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
__lowercase = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
__lowercase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
__lowercase = tokenizer(_lowerCamelCase ,padding=_lowerCamelCase ,return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' ,_lowerCamelCase )
self.assertIn('''attention_mask''' ,_lowerCamelCase )
self.assertIn('''token_type_ids''' ,_lowerCamelCase )
@require_torch
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.canine_tokenizer
__lowercase = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
__lowercase = tokenizer(
text_target=_lowerCamelCase ,max_length=32 ,padding='''max_length''' ,truncation=_lowerCamelCase ,return_tensors='''pt''' )
self.assertEqual(32 ,targets['''input_ids'''].shape[1] )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
__lowercase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
__lowercase = tempfile.mkdtemp()
__lowercase = ''' He is very happy, UNwant\u00E9d,running'''
__lowercase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
__lowercase = chr(0xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
__lowercase = after_tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase ,_lowerCamelCase )
self.assertIn(_lowerCamelCase ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
__lowercase = tokenizer.__class__.from_pretrained(_lowerCamelCase ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_lowerCamelCase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase , __lowercase = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_5
__lowercase = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
__lowercase = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase ,input_encoded + special_token_id )
__lowercase = tokenizer.decode(_lowerCamelCase ,skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = chr(0xe_0_0_5 )
__lowercase = chr(0xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
__lowercase = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(len(_lowerCamelCase ) ,1 )
self.assertEqual(token_a[0] ,_lowerCamelCase )
self.assertEqual(token_a[0] ,_lowerCamelCase )
@require_tokenizers
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
__lowercase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,encoding='''utf-8''' ) as json_file:
__lowercase = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
__lowercase = [new_token_a]
__lowercase = [new_token_a]
with open(os.path.join(_lowerCamelCase ,'''special_tokens_map.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
with open(os.path.join(_lowerCamelCase ,'''tokenizer_config.json''' ) ,'''w''' ,encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase ,_lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowercase = tokenizer_class.from_pretrained(_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
__lowercase = 0xe_0_0_7
__lowercase = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowercase = [AddedToken(_lowerCamelCase ,lstrip=_lowerCamelCase )]
__lowercase = tokenizer_class.from_pretrained(
_lowerCamelCase ,additional_special_tokens=_lowerCamelCase ,extra_ids=0 )
self.assertIn(_lowerCamelCase ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = '''hello world'''
if self.space_between_special_tokens:
__lowercase = '''[CLS] hello world [SEP]'''
else:
__lowercase = input
__lowercase = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
__lowercase = tokenizer.decode(_lowerCamelCase ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase ,[output, output.lower()] )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowercase = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
__lowercase = '''a'''
__lowercase = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,attr + '''_id''' ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,_lowerCamelCase ) ,_lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase ,attr + '''_id''' ) ,_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[] )
__lowercase = 0xe_0_0_6
__lowercase = chr(_lowerCamelCase )
setattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ,[additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens''' ) ,[additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase ,'''additional_special_tokens_ids''' ) ,[additional_special_token_id] )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
| 217 | 0 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
"facebook/detr-resnet-50": "https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json",
# See all DETR models at https://huggingface.co/models?filter=detr
}
class __lowerCAmelCase (lowercase__ ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = """detr"""
lowerCAmelCase__ : Any = ["""past_key_values"""]
lowerCAmelCase__ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__(self : Tuple , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=None , UpperCamelCase : Dict=3 , UpperCamelCase : Union[str, Any]=100 , UpperCamelCase : Optional[int]=6 , UpperCamelCase : Union[str, Any]=2048 , UpperCamelCase : Optional[int]=8 , UpperCamelCase : str=6 , UpperCamelCase : Optional[int]=2048 , UpperCamelCase : str=8 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=True , UpperCamelCase : Optional[int]="relu" , UpperCamelCase : Any=256 , UpperCamelCase : str=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Tuple=1.0 , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[Any]="sine" , UpperCamelCase : int="resnet50" , UpperCamelCase : str=True , UpperCamelCase : Any=False , UpperCamelCase : List[str]=1 , UpperCamelCase : List[Any]=5 , UpperCamelCase : List[Any]=2 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Dict=2 , UpperCamelCase : Any=0.1 , **UpperCamelCase : List[str] , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__ = CONFIG_MAPPING["resnet"](out_features=['''stage4'''] )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
lowercase__ = backbone_config.get('''model_type''' )
lowercase__ = CONFIG_MAPPING[backbone_model_type]
lowercase__ = config_class.from_dict(__lowerCamelCase )
# set timm attributes to None
lowercase__ = None, None, None
lowercase__ = use_timm_backbone
lowercase__ = backbone_config
lowercase__ = num_channels
lowercase__ = num_queries
lowercase__ = d_model
lowercase__ = encoder_ffn_dim
lowercase__ = encoder_layers
lowercase__ = encoder_attention_heads
lowercase__ = decoder_ffn_dim
lowercase__ = decoder_layers
lowercase__ = decoder_attention_heads
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = activation_function
lowercase__ = init_std
lowercase__ = init_xavier_std
lowercase__ = encoder_layerdrop
lowercase__ = decoder_layerdrop
lowercase__ = encoder_layers
lowercase__ = auxiliary_loss
lowercase__ = position_embedding_type
lowercase__ = backbone
lowercase__ = use_pretrained_backbone
lowercase__ = dilation
# Hungarian matcher
lowercase__ = class_cost
lowercase__ = bbox_cost
lowercase__ = giou_cost
# Loss coefficients
lowercase__ = mask_loss_coefficient
lowercase__ = dice_loss_coefficient
lowercase__ = bbox_loss_coefficient
lowercase__ = giou_loss_coefficient
lowercase__ = eos_coefficient
super().__init__(is_encoder_decoder=__lowerCamelCase , **__lowerCamelCase )
@property
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCamelCase__ (self : Optional[Any] ):
'''simple docstring'''
return self.d_model
@classmethod
def UpperCamelCase__ (cls : List[str] , UpperCamelCase : PretrainedConfig , **UpperCamelCase : List[Any] ):
'''simple docstring'''
return cls(backbone_config=__lowerCamelCase , **__lowerCamelCase )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowercase__ = self.backbone_config.to_dict()
lowercase__ = self.__class__.model_type
return output
class __lowerCAmelCase (lowercase__ ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = version.parse("""1.11""" )
@property
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return 1E-5
@property
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return 12
| 2 |
class _lowercase :
"""simple docstring"""
def __init__( self : List[Any] , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = size
lowerCamelCase__ : List[str] = [0] * size
lowerCamelCase__ : str = [0] * size
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase ( __lowerCamelCase : int ):
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase ( self : int , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = value
while index < self.size:
lowerCamelCase__ : Tuple = self.get_prev(__lowerCamelCase ) + 1
if current_left_border == index:
lowerCamelCase__ : Optional[Any] = value
else:
lowerCamelCase__ : str = max(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowerCamelCase__ : Dict = self.get_next(__lowerCamelCase )
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : int ):
'''simple docstring'''
right -= 1 # Because of right is exclusive
lowerCamelCase__ : str = 0
while left <= right:
lowerCamelCase__ : Optional[Any] = self.get_prev(__lowerCamelCase )
if left <= current_left:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.tree[right] )
lowerCamelCase__ : Any = current_left
else:
lowerCamelCase__ : Optional[Any] = max(__lowerCamelCase , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 184 | 0 |
'''simple docstring'''
a__ : Optional[int] = {'a': ['c', 'b'], 'b': ['d', 'e'], 'c': [], 'd': [], 'e': []}
a__ : Union[str, Any] = ['a', 'b', 'c', 'd', 'e']
def _lowercase ( __A ,__A ,__A ):
'''simple docstring'''
__UpperCamelCase = start
# add current to visited
visited.append(__A )
__UpperCamelCase = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCamelCase = topological_sort(__A ,__A ,__A )
# if all neighbors visited add current to sort
sort.append(__A )
# if all vertices haven't been visited select a new one to visit
if len(__A ) != len(__A ):
for vertice in vertices:
if vertice not in visited:
__UpperCamelCase = topological_sort(__A ,__A ,__A )
# return sort
return sort
if __name__ == "__main__":
a__ : int = topological_sort('a', [], [])
print(sort)
| 243 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : str = {'vocab_file': 'vocab.txt'}
a__ : Any = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Tuple = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : str = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ConvBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> int:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> Tuple:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 243 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCAmelCase_ ( __snake_case = True , *__snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
_lowercase =False
if main_process_only:
_lowercase =PartialState().local_process_index == 0
return _tqdm(*__snake_case , **__snake_case , disable=__snake_case )
| 5 |
"""simple docstring"""
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[Any] = PegasusTokenizer
A_ : int = PegasusTokenizerFast
A_ : Optional[Any] = True
A_ : Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def _SCREAMING_SNAKE_CASE ( self : int, **_lowerCamelCase : List[Any] ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any], _lowerCamelCase : Dict ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = '''</s>'''
__A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCamelCase ), _lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCamelCase ), _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '''<pad>''' )
self.assertEqual(vocab_keys[1], '''</s>''' )
self.assertEqual(vocab_keys[-1], '''v''' )
self.assertEqual(len(_lowerCamelCase ), 11_03 )
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 11_03 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__A = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
__A = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
__A = '''To ensure a smooth flow of bank resolutions.'''
__A = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
__A = tokenizer([raw_input_str], return_tensors=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 1_50, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
# fmt: off
__A = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCamelCase, model_name='''google/bigbird-pegasus-large-arxiv''', revision='''ba85d0851d708441f91440d509690f1ab6353415''', )
@require_sentencepiece
@require_tokenizers
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = PegasusTokenizer
A_ : Union[str, Any] = PegasusTokenizerFast
A_ : Any = True
A_ : str = True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__A = PegasusTokenizer(_lowerCamelCase, offset=0, mask_token_sent=_lowerCamelCase, mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int], **_lowerCamelCase : Dict ):
'''simple docstring'''
return PegasusTokenizer.from_pretrained(self.tmpdirname, **_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str], _lowerCamelCase : List[str] ):
'''simple docstring'''
return ("This is a test", "This is a test")
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__A = self.tokenizer_class.from_pretrained(self.tmpdirname )
__A = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
__A = rust_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
__A = py_tokenizer([raw_input_str], return_tensors=_lowerCamelCase, add_special_tokens=_lowerCamelCase ).input_ids[0]
self.assertListEqual(_lowerCamelCase, _lowerCamelCase )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = ['''This is going to be way too long.''' * 10_00, '''short example''']
__A = ['''not super long but more than 5 tokens''', '''tiny''']
__A = self._large_tokenizer(_lowerCamelCase, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
__A = self._large_tokenizer(
text_target=_lowerCamelCase, max_length=5, padding=_lowerCamelCase, truncation=_lowerCamelCase, return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_lowerCamelCase ) == 2 # input_ids, attention_mask.
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
__A = self._large_tokenizer(_lowerCamelCase ).input_ids
self.assertListEqual(
_lowerCamelCase, [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1], )
| 266 | 0 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
lowerCAmelCase__ : Optional[Any] = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=False, _UpperCAmelCase=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase : Dict = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
__UpperCAmelCase : Optional[int] = config_class.from_json_file(_UpperCAmelCase )
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Any = True
print(F"Building TensorFlow model from configuration: {config}" )
__UpperCAmelCase : Tuple = model_class(_UpperCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase : int = cached_file(
_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase : Any = load_pytorch_checkpoint_in_tfa_model(_UpperCAmelCase, _UpperCAmelCase )
if compare_with_pt_model:
__UpperCAmelCase : Tuple = tf_model(tf_model.dummy_inputs, training=_UpperCAmelCase ) # build the network
__UpperCAmelCase : Tuple = torch.load(_UpperCAmelCase, map_location="cpu" )
__UpperCAmelCase : int = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_UpperCAmelCase, config=_UpperCAmelCase, state_dict=_UpperCAmelCase )
with torch.no_grad():
__UpperCAmelCase : str = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase : str = pto[0].numpy()
__UpperCAmelCase : int = tfo[0].numpy()
__UpperCAmelCase : int = np.amax(np.abs(np_pt - np_tf ) )
print(F"Max absolute difference between models outputs {diff}" )
assert diff <= 2E-2, F"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(F"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(_UpperCAmelCase, save_format="h5" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase=None, _UpperCAmelCase=None, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, _UpperCAmelCase=False, ):
if args_model_type is None:
__UpperCAmelCase : Optional[int] = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase : Optional[Any] = [args_model_type]
for j, model_type in enumerate(_UpperCAmelCase, start=1 ):
print("=" * 100 )
print(F" Converting model type {j}/{len(_UpperCAmelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase : int = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase : Dict = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase : List[str] = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_UpperCAmelCase, _UpperCAmelCase ), start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__UpperCAmelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(F" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
F" Converting checkpoint {i}/{len(_UpperCAmelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase : int = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
else:
__UpperCAmelCase : Tuple = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase : List[Any] = cached_file(_UpperCAmelCase, _UpperCAmelCase, force_download=not use_cached_models )
else:
__UpperCAmelCase : Optional[Any] = model_shortcut_name
if os.path.isfile(_UpperCAmelCase ):
__UpperCAmelCase : List[str] = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=_UpperCAmelCase, pytorch_checkpoint_path=_UpperCAmelCase, config_file=_UpperCAmelCase, tf_dump_path=os.path.join(_UpperCAmelCase, model_shortcut_name + "-tf_model.h5" ), compare_with_pt_model=_UpperCAmelCase, )
if remove_cached_files:
os.remove(_UpperCAmelCase )
os.remove(_UpperCAmelCase )
if __name__ == "__main__":
lowerCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
f"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
lowerCAmelCase__ : Dict = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 353 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase__ : Any = get_tests_dir("fixtures/test_sentencepiece_no_bos.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : Tuple = PegasusTokenizer(UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/pegasus-large" )
def lowerCamelCase_ ( self : List[Any] , **UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : int ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : List[str] = "</s>"
__UpperCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : int = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "</s>" )
self.assertEqual(vocab_keys[-1] , "v" )
self.assertEqual(len(UpperCAmelCase_ ) , 1_103 )
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_103 )
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : int = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Tuple = (
"Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"
" </s> <pad> <pad> <pad>"
)
__UpperCAmelCase : List[str] = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
__UpperCAmelCase : Any = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__UpperCAmelCase : Tuple = "<mask_1> To ensure a <mask_2> flow of bank resolutions."
__UpperCAmelCase : Optional[Any] = [2, 413, 615, 114, 3, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
__UpperCAmelCase : Dict = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 96_103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1_024
__UpperCAmelCase : Tuple = "To ensure a smooth flow of bank resolutions."
__UpperCAmelCase : str = [413, 615, 114, 2_291, 1_971, 113, 1_679, 10_710, 107, 1]
__UpperCAmelCase : Union[str, Any] = tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def lowerCamelCase_ ( self : str ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = ["This is going to be way too long." * 150, "short example"]
__UpperCAmelCase : Optional[int] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : str = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : Union[str, Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 1_024)
assert batch.attention_mask.shape == (2, 1_024)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
# fmt: off
__UpperCAmelCase : Tuple = {"input_ids": [[38_979, 143, 18_485, 606, 130, 26_669, 87_686, 121, 54_189, 1_129, 111, 26_669, 87_686, 121, 9_114, 14_787, 121, 13_249, 158, 592, 956, 121, 14_621, 31_576, 143, 62_613, 108, 9_688, 930, 43_430, 11_562, 62_613, 304, 108, 11_443, 897, 108, 9_314, 17_415, 63_399, 108, 11_443, 7_614, 18_316, 118, 4_284, 7_148, 12_430, 143, 1_400, 25_703, 158, 111, 4_284, 7_148, 11_772, 143, 21_297, 1_064, 158, 122, 204, 3_506, 1_754, 1_133, 14_787, 1_581, 115, 33_224, 4_482, 111, 1_355, 110, 29_173, 317, 50_833, 108, 20_147, 94_665, 111, 77_198, 107, 1], [110, 62_613, 117, 638, 112, 1_133, 121, 20_098, 1_355, 79_050, 13_872, 135, 1_596, 53_541, 1_352, 141, 13_039, 5_542, 124, 302, 518, 111, 268, 2_956, 115, 149, 4_427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1_235, 2_799, 18_289, 17_780, 204, 109, 9_474, 1_296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name="google/bigbird-pegasus-large-arxiv" , revision="ba85d0851d708441f91440d509690f1ab6353415" , )
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case__ ,unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE = PegasusTokenizer
SCREAMING_SNAKE_CASE = PegasusTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__UpperCAmelCase : List[str] = PegasusTokenizer(UpperCAmelCase_ , offset=0 , mask_token_sent=UpperCAmelCase_ , mask_token="[MASK]" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv" )
def lowerCamelCase_ ( self : Union[str, Any] , **UpperCAmelCase_ : int ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowerCamelCase_ ( self : str , UpperCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__UpperCAmelCase : List[str] = (
"Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"
" <pad> <pad> <pad>"
)
__UpperCAmelCase : str = rust_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : int = py_tokenizer([raw_input_str] , return_tensors=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_torch
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
__UpperCAmelCase : Any = ["This is going to be way too long." * 1_000, "short example"]
__UpperCAmelCase : List[Any] = ["not super long but more than 5 tokens", "tiny"]
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
__UpperCAmelCase : List[Any] = self._large_tokenizer(
text_target=UpperCAmelCase_ , max_length=5 , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors="pt" )
assert batch.input_ids.shape == (2, 4_096)
assert batch.attention_mask.shape == (2, 4_096)
assert targets["input_ids"].shape == (2, 5)
assert len(UpperCAmelCase_ ) == 2 # input_ids, attention_mask.
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = (
"This is an example string that is used to test the original TF implementation against the HF"
" implementation"
)
__UpperCAmelCase : int = self._large_tokenizer(UpperCAmelCase_ ).input_ids
self.assertListEqual(
UpperCAmelCase_ , [182, 117, 142, 587, 4_211, 120, 117, 263, 112, 804, 109, 856, 25_016, 3_137, 464, 109, 26_955, 3_137, 1] , )
| 37 | 0 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
"""simple docstring"""
def __init__( self :List[str] , lowercase_ :int | None = None ) -> Optional[Any]:
UpperCAmelCase = value
UpperCAmelCase = random()
UpperCAmelCase = None
UpperCAmelCase = None
def __repr__( self :str ) -> Tuple:
from pprint import pformat
if self.left is None and self.right is None:
return f"""\'{self.value}: {self.prior:.5}\'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self :str ) -> Union[str, Any]:
UpperCAmelCase = str(self.value ) + " "
UpperCAmelCase = str(self.left or '' )
UpperCAmelCase = str(self.right or '' )
return value + left + right
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
UpperCAmelCase = split(root.left , A_ )
return left, root
else:
UpperCAmelCase = split(root.right , A_ )
return root, right
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
UpperCAmelCase = merge(left.right , A_ )
return left
else:
UpperCAmelCase = merge(A_ , right.left )
return right
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = Node(A_ )
UpperCAmelCase = split(A_ , A_ )
return merge(merge(A_ , A_ ) , A_ )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
UpperCAmelCase = split(A_ , value - 1 )
UpperCAmelCase = split(A_ , A_ )
return merge(A_ , A_ )
def _lowerCAmelCase ( lowercase_ ):
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def _lowerCAmelCase ( lowercase_ , lowercase_ ):
for arg in args.split():
if arg[0] == "+":
UpperCAmelCase = insert(A_ , int(arg[1:] ) )
elif arg[0] == "-":
UpperCAmelCase = erase(A_ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def _lowerCAmelCase ( ):
UpperCAmelCase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
UpperCAmelCase = input()
while args != "q":
UpperCAmelCase = interact_treap(A_ , A_ )
print(A_ )
UpperCAmelCase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 78 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : str = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
a : Union[str, Any] = 128
elif "12-12" in model_name:
a : List[Any] = 12
a : str = 12
elif "14-14" in model_name:
a : List[Any] = 14
a : Optional[int] = 14
elif "16-16" in model_name:
a : Any = 16
a : List[Any] = 16
else:
raise ValueError("Model not supported" )
a : Optional[int] = "huggingface/label-files"
if "speech-commands" in model_name:
a : Optional[int] = 35
a : List[str] = "speech-commands-v2-id2label.json"
else:
a : Optional[Any] = 527
a : Tuple = "audioset-id2label.json"
a : List[str] = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
a : Union[str, Any] = {int(A_ ): v for k, v in idalabel.items()}
a : Any = idalabel
a : str = {v: k for k, v in idalabel.items()}
return config
def lowercase ( A_ )-> Tuple:
'''simple docstring'''
if "module.v" in name:
a : Union[str, Any] = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
a : List[Any] = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
a : Union[str, Any] = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
a : str = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
a : Union[str, Any] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
a : Union[str, Any] = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
a : str = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
a : Tuple = name.replace("attn" , "attention.self" )
if "norm1" in name:
a : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
a : Tuple = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
a : List[str] = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
a : Optional[int] = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def lowercase ( A_ , A_ )-> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
a : str = orig_state_dict.pop(A_ )
if "qkv" in key:
a : int = key.split("." )
a : Optional[int] = int(key_split[3] )
a : int = config.hidden_size
if "weight" in key:
a : List[str] = val[:dim, :]
a : Any = val[dim : dim * 2, :]
a : int = val[-dim:, :]
else:
a : Optional[Any] = val[:dim]
a : Union[str, Any] = val[dim : dim * 2]
a : str = val[-dim:]
else:
a : str = val
return orig_state_dict
def lowercase ( A_ )-> Dict:
'''simple docstring'''
a : Union[str, Any] = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(A_ , A_ )
@torch.no_grad()
def lowercase ( A_ , A_ , A_=False )-> Optional[int]:
'''simple docstring'''
a : Optional[int] = get_audio_spectrogram_transformer_config(A_ )
a : Dict = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
a : Any = model_name_to_url[model_name]
a : List[Any] = torch.hub.load_state_dict_from_url(A_ , map_location="cpu" )
# remove some keys
remove_keys(A_ )
# rename some keys
a : Union[str, Any] = convert_state_dict(A_ , A_ )
# load 🤗 model
a : List[str] = ASTForAudioClassification(A_ )
model.eval()
model.load_state_dict(A_ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
a : Tuple = -4.2_6_7_7_3_9_3 if "speech-commands" not in model_name else -6.8_4_5_9_7_8
a : Union[str, Any] = 4.5_6_8_9_9_7_4 if "speech-commands" not in model_name else 5.5_6_5_4_5_2_6
a : str = 1_024 if "speech-commands" not in model_name else 128
a : List[Any] = ASTFeatureExtractor(mean=A_ , std=A_ , max_length=A_ )
if "speech-commands" in model_name:
a : List[str] = load_dataset("speech_commands" , "v0.02" , split="validation" )
a : int = dataset[0]["audio"]["array"]
else:
a : Tuple = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
a , a : Tuple = torchaudio.load(A_ )
a : Optional[Any] = waveform.squeeze().numpy()
a : Union[str, Any] = feature_extractor(A_ , sampling_rate=16_000 , return_tensors="pt" )
# forward pass
a : Optional[Any] = model(**A_ )
a : List[str] = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
a : Any = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
a : Optional[int] = torch.tensor([-1.1_9_8_6, -7.0_9_0_3, -8.2_7_1_8] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
a : List[str] = torch.tensor([-2.6_1_2_8, -8.0_0_8_0, -9.4_3_4_4] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
a : Tuple = torch.tensor([-1.5_0_8_0, -7.4_5_3_4, -8.8_9_1_7] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
a : int = torch.tensor([-0.5_0_5_0, -6.5_8_3_3, -8.0_8_4_3] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
a : Any = torch.tensor([-0.3_8_2_6, -7.0_3_3_6, -8.2_4_1_3] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
a : Dict = torch.tensor([-1.2_1_1_3, -6.9_1_0_1, -8.3_4_7_0] )
elif model_name == "ast-finetuned-speech-commands-v2":
a : Union[str, Any] = torch.tensor([6.1_5_8_9, -8.0_5_6_6, -8.7_9_8_4] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , A_ , atol=1e-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(A_ ).mkdir(exist_ok=A_ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(A_ )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__lowercase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 40 | 0 |
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_snake_case : str = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __snake_case ( self : Optional[int] , lowerCamelCase : List[Any] ) -> Optional[int]:
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : int = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[int] ) -> Tuple:
if len(lowerCamelCase ) == 0 or len(lowerCamelCase ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase ) )
if isinstance(lowerCamelCase , lowerCamelCase ):
__snake_case : List[str] = [sequences]
__snake_case : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(_lowerCAmelCase )
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase : Union[str, Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase : Dict , **lowerCamelCase : List[str] ) -> int:
__snake_case : List[str] = args_parser
super().__init__(*lowerCamelCase , **lowerCamelCase )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def __snake_case ( self : str ) -> Tuple:
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def __snake_case ( self : List[Any] , lowerCamelCase : Any , lowerCamelCase : Dict=True , lowerCamelCase : Dict=True , lowerCamelCase : int=TruncationStrategy.ONLY_FIRST , **lowerCamelCase : Dict ) -> int:
__snake_case : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
__snake_case : Optional[Any] = self.tokenizer.eos_token
try:
__snake_case : Optional[int] = self.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , )
except Exception as e:
if "too short" in str(lowerCamelCase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__snake_case : List[str] = self.tokenizer(
lowerCamelCase , add_special_tokens=lowerCamelCase , return_tensors=lowerCamelCase , padding=lowerCamelCase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def __snake_case ( self : Union[str, Any] , **lowerCamelCase : Tuple ) -> Tuple:
if kwargs.get("multi_class" , lowerCamelCase ) is not None:
__snake_case : Tuple = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
__snake_case : int = {}
if "candidate_labels" in kwargs:
__snake_case : Union[str, Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
__snake_case : Tuple = kwargs["hypothesis_template"]
__snake_case : Any = {}
if "multi_label" in kwargs:
__snake_case : List[str] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any] , lowerCamelCase : Union[str, List[str]] , *lowerCamelCase : Dict , **lowerCamelCase : Union[str, Any] , ) -> Tuple:
if len(lowerCamelCase ) == 0:
pass
elif len(lowerCamelCase ) == 1 and "candidate_labels" not in kwargs:
__snake_case : Any = args[0]
else:
raise ValueError(F'Unable to understand extra arguments {args}' )
return super().__call__(lowerCamelCase , **lowerCamelCase )
def __snake_case ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : Optional[int]=None , lowerCamelCase : Union[str, Any]="This example is {}." ) -> Tuple:
__snake_case , __snake_case : Optional[Any] = self._args_parser(lowerCamelCase , lowerCamelCase , lowerCamelCase )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ):
__snake_case : Optional[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase ) - 1,
**model_input,
}
def __snake_case ( self : Union[str, Any] , lowerCamelCase : Tuple ) -> List[str]:
__snake_case : str = inputs["candidate_label"]
__snake_case : List[Any] = inputs["sequence"]
__snake_case : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
__snake_case : Tuple = self.model(**lowerCamelCase )
__snake_case : int = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def __snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Dict=False ) -> Optional[int]:
__snake_case : Tuple = [outputs["candidate_label"] for outputs in model_outputs]
__snake_case : Optional[int] = [outputs["sequence"] for outputs in model_outputs]
__snake_case : List[str] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
__snake_case : List[Any] = logits.shape[0]
__snake_case : Optional[int] = len(lowerCamelCase )
__snake_case : Optional[Any] = N // n
__snake_case : Optional[int] = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__snake_case : str = self.entailment_id
__snake_case : List[Any] = -1 if entailment_id == 0 else 0
__snake_case : Union[str, Any] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__snake_case : Any = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase )
__snake_case : Optional[Any] = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__snake_case : Dict = reshaped_outputs[..., self.entailment_id]
__snake_case : Optional[Any] = np.exp(lowerCamelCase ) / np.exp(lowerCamelCase ).sum(-1 , keepdims=lowerCamelCase )
__snake_case : Dict = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 134 |
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
if index == r:
for j in range(__lowerCamelCase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , index + 1 , __lowerCamelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
# A temporary array to store all combination one by one
__snake_case : Union[str, Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , 0 , __lowerCamelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_snake_case : List[str] = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 134 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def lowerCAmelCase_ ( snake_case_ : Any , snake_case_ : bool = True , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : float = math.inf , snake_case_ : float = -math.inf , snake_case_ : bool = False , snake_case_ : float = 1_00 , snake_case_ : float = 0.01 , snake_case_ : float = 1 , ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = False
UpperCAmelCase_ = search_prob
UpperCAmelCase_ = start_temperate
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
while not search_end:
UpperCAmelCase_ = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase_ = current_state
scores.append(snake_case_ )
iterations += 1
UpperCAmelCase_ = None
UpperCAmelCase_ = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase_ = random.randint(0 , len(snake_case_ ) - 1 ) # picking a random neighbor
UpperCAmelCase_ = neighbors.pop(snake_case_ )
UpperCAmelCase_ = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase_ = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase_ = picked_neighbor
else:
UpperCAmelCase_ = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase_ = picked_neighbor
UpperCAmelCase_ = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase_ = True
else:
UpperCAmelCase_ = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(snake_case_ ) , snake_case_ )
plt.xlabel("Iterations" )
plt.ylabel("Function values" )
plt.show()
return best_state
if __name__ == "__main__":
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int ) -> int:
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_: List[Any] =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Optional[Any] =simulated_annealing(
prob, find_max=False, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
# starting the problem with initial coordinates (12, 47)
SCREAMING_SNAKE_CASE_: Tuple =SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: List[str] =simulated_annealing(
prob, find_max=True, max_x=1_00, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f"and 50 > y > - 5 found via hill climbing: {local_min.score()}"
)
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
return (3 * x**2) - (6 * y)
SCREAMING_SNAKE_CASE_: str =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Tuple =simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
)
SCREAMING_SNAKE_CASE_: List[str] =SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
SCREAMING_SNAKE_CASE_: Tuple =simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f"{local_min.score()}"
)
| 1 | '''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
SCREAMING_SNAKE_CASE_: Optional[int] =Lock()
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(snake_case_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCAmelCase_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCAmelCase_ = min(snake_case_ , snake_case_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(snake_case_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCAmelCase_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCAmelCase_ = max(snake_case_ , snake_case_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Optional[Any] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
for i in range(1 , len(snake_case_ ) - 1 ):
UpperCAmelCase_ = Pipe()
UpperCAmelCase_ = Pipe()
process_array_.append(
Process(
target=snake_case_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCAmelCase_ = temp_rs
UpperCAmelCase_ = temp_rr
process_array_.append(
Process(
target=snake_case_ , args=(
len(snake_case_ ) - 1,
arr[len(snake_case_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(snake_case_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(snake_case_ ) ):
UpperCAmelCase_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def lowerCAmelCase_ ( ) -> str:
'''simple docstring'''
UpperCAmelCase_ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*snake_case_ )
UpperCAmelCase_ = odd_even_transposition(snake_case_ )
print("Sorted List\n" )
print(*snake_case_ )
if __name__ == "__main__":
main()
| 1 | 1 |
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = [int(lowerCAmelCase__ ) for i in ip_va_address.split('''.''' ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 2_5_4 for octet in octets )
if __name__ == "__main__":
lowercase__ =input().strip()
lowercase__ ='valid' if is_ip_va_address_valid(ip) else 'invalid'
print(F"""{ip} is a {valid_or_invalid} IP v4 address.""")
| 90 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
lowercase__ ={'allegro/herbert-base-cased': 514}
lowercase__ ={}
class UpperCamelCase__ ( __lowercase ):
_SCREAMING_SNAKE_CASE : str = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : Optional[Any] = HerbertTokenizer
def __init__(self : str , snake_case_ : Optional[Any]=None , snake_case_ : Tuple=None , snake_case_ : Dict=None , snake_case_ : List[str]="<s>" , snake_case_ : Any="<unk>" , snake_case_ : Optional[int]="<pad>" , snake_case_ : Optional[Any]="<mask>" , snake_case_ : List[Any]="</s>" , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , cls_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , sep_token=snake_case_ , **snake_case_ , )
def lowerCAmelCase (self : int , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : int = [self.cls_token_id]
__a : Union[str, Any] = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase (self : Optional[Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None , snake_case_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1] + ([0] * len(snake_case_ )) + [1]
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[int] , snake_case_ : Optional[List[int]] = None ):
__a : Optional[int] = [self.sep_token_id]
__a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase (self : int , snake_case_ : str , snake_case_ : Optional[str] = None ):
__a : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 90 | 1 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase : str = logging.get_logger(__name__)
UpperCAmelCase : Any = '''▁'''
UpperCAmelCase : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''sentencepiece_model_ckpt''': '''sentencepiece.bpe.model'''}
UpperCAmelCase : Tuple = {
'''sentencepiece_model_file''': '''sentencepiece.bpe.model''',
'''vocab_file''': '''vocab.txt''',
}
UpperCAmelCase : int = {
'''vocab_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt''',
},
'''sentencepiece_model_file''': {
'''ernie-m-base''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
'''ernie-m-large''': '''https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model''',
},
}
UpperCAmelCase : List[Any] = {
'''ernie-m-base''': 5_14,
'''ernie-m-large''': 5_14,
}
UpperCAmelCase : List[str] = {
'''ernie-m-base''': {'''do_lower_case''': False},
'''ernie-m-large''': {'''do_lower_case''': False},
}
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : List[str] = ["input_ids"]
UpperCamelCase : str = VOCAB_FILES_NAMES
UpperCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : int = RESOURCE_FILES_NAMES
def __init__( self , _A , _A=None , _A=False , _A="utf8" , _A="[UNK]" , _A="[SEP]" , _A="[PAD]" , _A="[CLS]" , _A="[MASK]" , _A = None , **_A , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__A : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , vocab_file=_A , encoding=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__A : int = do_lower_case
__A : Any = sentencepiece_model_ckpt
__A : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_A )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
__A : Any = self.load_vocab(filepath=_A )
else:
__A : Optional[Any] = {self.sp_model.id_to_piece(_A ): id for id in range(self.sp_model.get_piece_size() )}
__A : Tuple = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase_ ( self , _A ):
if text is None:
return None
__A : List[str] = self.tokenize(_A )
__A , __A : Tuple = '', []
for i, ch in enumerate(_A ):
if ch in self.SP_CHAR_MAPPING:
__A : List[str] = self.SP_CHAR_MAPPING.get(_A )
else:
__A : str = unicodedata.normalize('NFKC' , _A )
if self.is_whitespace(_A ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_A ) )
__A , __A , __A : Dict = normalized_text, [], 0
if self.do_lower_case:
__A : Union[str, Any] = text.lower()
for token in split_tokens:
if token[:1] == "▁":
__A : List[str] = token[1:]
__A : Dict = text[offset:].index(_A ) + offset
__A : List[str] = start + len(_A )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
__A : str = end
return token_mapping
@property
def UpperCAmelCase_ ( self ):
return len(self.vocab )
def UpperCAmelCase_ ( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
__A : List[Any] = self.__dict__.copy()
__A : Dict = None
return state
def __setstate__( self , _A ):
__A : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__A : int = {}
__A : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase_ ( self , _A ):
return "".join((self.SP_CHAR_MAPPING.get(_A , _A ) for c in text) )
def UpperCAmelCase_ ( self , _A , _A=False , _A=64 , _A=0.1 ):
if self.sp_model_kwargs.get('enable_sampling' ) is True:
__A : Union[str, Any] = True
if self.sp_model_kwargs.get('alpha' ) is not None:
__A : str = self.sp_model_kwargs.get('alpha' )
if self.sp_model_kwargs.get('nbest_size' ) is not None:
__A : Optional[Any] = self.sp_model_kwargs.get('nbest_size' )
if not enable_sampling:
__A : List[Any] = self.sp_model.EncodeAsPieces(_A )
else:
__A : Optional[int] = self.sp_model.SampleEncodeAsPieces(_A , _A , _A )
__A : int = []
for pi, piece in enumerate(_A ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_A ) and pi != 0:
new_pieces.append(_A )
continue
else:
continue
__A : List[str] = 0
for i, chunk in enumerate(_A ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_A ) or self.is_punct(_A ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_A )
__A : List[Any] = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : Optional[Any] = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
__A : Optional[Any] = i
if len(_A ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase_ ( self , _A ):
__A : List[Any] = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
__A : Optional[Any] = self.convert_ids_to_tokens(_A )
__A : Tuple = ''.join(_A ).replace(_A , ' ' ).strip()
return out_string
def UpperCAmelCase_ ( self , _A ):
return self.vocab.get(_A , self.vocab.get(self.unk_token ) )
def UpperCAmelCase_ ( self , _A ):
return self.reverse_vocab.get(_A , self.unk_token )
def UpperCAmelCase_ ( self , _A , _A=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A : str = [self.cls_token_id]
__A : Union[str, Any] = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase_ ( self , _A , _A=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase_ ( self , _A , _A=None , _A=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1]
def UpperCAmelCase_ ( self , _A , _A = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_A ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_A ) + 1) + [1] * (len(_A ) + 3)
def UpperCAmelCase_ ( self , _A ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase_ ( self , _A ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_A ) == 1:
__A : Union[str, Any] = unicodedata.category(_A )
if cat == "Zs":
return True
return False
def UpperCAmelCase_ ( self , _A ):
__A : Tuple = {}
with io.open(_A , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(_A ):
__A : Optional[Any] = line.rstrip('\n' )
__A : List[str] = int(_A )
return token_to_idx
def UpperCAmelCase_ ( self , _A , _A = None ):
__A : int = 0
if os.path.isdir(_A ):
__A : List[str] = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__A : Dict = (filename_prefix + '-' if filename_prefix else '') + save_directory
with open(_A , 'w' , encoding='utf-8' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
' Please check that the vocabulary is not corrupted!' )
__A : Tuple = token_index
writer.write(token + '\n' )
index += 1
__A : Dict = os.path.join(_A , 'sentencepiece.bpe.model' )
with open(_A , 'wb' ) as fi:
__A : Any = self.sp_model.serialized_model_proto()
fi.write(_A )
return (vocab_file,)
| 280 |
def _SCREAMING_SNAKE_CASE ( a , a = 0 ) -> list:
__A : int = length or len(a )
__A : str = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__A , __A : Optional[int] = list_data[i + 1], list_data[i]
__A : Union[str, Any] = True
return list_data if not swapped else bubble_sort(a , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['OwlViTFeatureExtractor']
_SCREAMING_SNAKE_CASE = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 81 | from math import isqrt
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 10**8) -> int:
_A = calculate_prime_numbers(max_number // 2)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 81 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def a__ ( lowerCAmelCase__ ) -> list[list[float]]:
UpperCAmelCase__ : Optional[int] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(lowerCAmelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
UpperCAmelCase__ : Union[str, Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
UpperCAmelCase__ : Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = matrix[1][1], matrix[0][0]
UpperCAmelCase__ , UpperCAmelCase__ : Any = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(lowerCAmelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(lowerCAmelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
UpperCAmelCase__ : Any = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
UpperCAmelCase__ : Dict = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
UpperCAmelCase__ : Any = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
UpperCAmelCase__ : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
UpperCAmelCase__ : str = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
UpperCAmelCase__ : List[Any] = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
UpperCAmelCase__ : List[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
UpperCAmelCase__ : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
UpperCAmelCase__ : int = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
UpperCAmelCase__ : Optional[int] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
UpperCAmelCase__ : Optional[Any] = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
UpperCAmelCase__ : Optional[Any] = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
UpperCAmelCase__ : Optional[int] = array(lowerCAmelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(lowerCAmelCase__ )
# Calculate the inverse of the matrix
return [[float(d(lowerCAmelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 181 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase__ = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = GPTaTokenizer
__UpperCamelCase = GPTaTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = {'''add_prefix_space''': True}
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :List[Any] ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A_ : Tuple = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
A_ : Optional[Any] = dict(zip(snake_case , range(len(snake_case ) ) ) )
A_ : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A_ : Tuple = {"unk_token": "<unk>"}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(snake_case ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(snake_case ) )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , **snake_case :int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , **snake_case :Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :int ):
'''simple docstring'''
A_ : Optional[Any] = "lower newer"
A_ : Any = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : str = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
A_ : Union[str, Any] = "lower newer"
A_ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
A_ : str = tokenizer.tokenize(snake_case , add_prefix_space=snake_case )
self.assertListEqual(snake_case , snake_case )
A_ : List[str] = tokens + [tokenizer.unk_token]
A_ : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
A_ : str = self.get_tokenizer()
A_ : str = self.get_rust_tokenizer(add_prefix_space=snake_case )
A_ : Tuple = "lower newer"
# Testing tokenization
A_ : Optional[Any] = tokenizer.tokenize(snake_case , add_prefix_space=snake_case )
A_ : List[str] = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids without special tokens
A_ : List[str] = tokenizer.encode(snake_case , add_special_tokens=snake_case , add_prefix_space=snake_case )
A_ : Optional[Any] = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing conversion to ids with special tokens
A_ : str = self.get_rust_tokenizer(add_prefix_space=snake_case )
A_ : List[Any] = tokenizer.encode(snake_case , add_prefix_space=snake_case )
A_ : Any = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
# Testing the unknown token
A_ : List[str] = tokens + [rust_tokenizer.unk_token]
A_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case ) , snake_case )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , *snake_case :Optional[Any] , **snake_case :str ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int=15 ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
A_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case )
# Simple input
A_ : str = "This is a simple input"
A_ : List[Any] = ["This is a simple input 1", "This is a simple input 2"]
A_ : Tuple = ("This is a simple input", "This is a pair")
A_ : Any = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding="max_length" )
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding="max_length" )
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding="max_length" , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding="max_length" )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding="max_length" )
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding="max_length" , )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
A_ : Union[str, Any] = "This is a simple input"
A_ : Optional[int] = ["This is a simple input looooooooong", "This is a simple input"]
A_ : Optional[Any] = ("This is a simple input", "This is a pair")
A_ : Dict = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
A_ : int = tokenizer.pad_token_id
A_ : List[str] = tokenizer(snake_case , padding="max_length" , max_length=30 , return_tensors="np" )
A_ : int = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors="np" )
A_ : Any = tokenizer(*snake_case , padding="max_length" , max_length=60 , return_tensors="np" )
A_ : List[Any] = tokenizer(snake_case , padding=snake_case , truncate=snake_case , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = "$$$"
A_ : Optional[int] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case , add_bos_token=snake_case )
A_ : List[str] = "This is a simple input"
A_ : str = ["This is a simple input 1", "This is a simple input 2"]
A_ : Optional[Any] = tokenizer.bos_token_id
A_ : List[Any] = tokenizer(snake_case )
A_ : List[Any] = tokenizer(snake_case )
self.assertEqual(out_s.input_ids[0] , snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
A_ : List[Any] = tokenizer.decode(out_s.input_ids )
A_ : Any = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[str] = [self.get_tokenizer(do_lower_case=snake_case , add_bos_token=snake_case )]
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : str = "Encode this."
A_ : Optional[Any] = "This one too please."
A_ : int = tokenizer.encode(snake_case , add_special_tokens=snake_case )
encoded_sequence += tokenizer.encode(snake_case , add_special_tokens=snake_case )
A_ : Union[str, Any] = tokenizer.encode_plus(
snake_case , snake_case , add_special_tokens=snake_case , return_special_tokens_mask=snake_case , )
A_ : Union[str, Any] = encoded_sequence_dict["input_ids"]
A_ : Optional[int] = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(snake_case ) , len(snake_case ) )
A_ : List[Any] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(snake_case )
]
A_ : int = [x for x in filtered_sequence if x is not None]
self.assertEqual(snake_case , snake_case )
@require_tokenizers
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
A_ : List[Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=snake_case )
A_ : Union[str, Any] = "A photo of a cat"
A_ : Dict = tokenizer.encode(
snake_case , )
self.assertEqual(snake_case , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("./test_opt" )
A_ : str = tokenizer.encode(
snake_case , )
self.assertEqual(snake_case , [2, 250, 1_345, 9, 10, 4_758] )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Union[str, Any] = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=snake_case )
A_ : Optional[Any] = "A photo of a cat"
A_ : Union[str, Any] = tokenizer.encode(
snake_case , )
# Same as above
self.assertEqual(snake_case , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=snake_case )
A_ : Any = "bos"
A_ : Tuple = tokenizer.get_vocab()["bos"]
A_ : Optional[int] = "A photo of a cat"
A_ : Any = tokenizer.encode(
snake_case , )
# We changed the bos token
self.assertEqual(snake_case , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
A_ : Tuple = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
A_ : Optional[Any] = tokenizer.encode(
snake_case , )
self.assertEqual(snake_case , [31_957, 250, 1_345, 9, 10, 4_758] )
| 70 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {
'''configuration_clap''': [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapAudioConfig''',
'''ClapConfig''',
'''ClapTextConfig''',
],
'''processing_clap''': ['''ClapProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
'''CLAP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ClapModel''',
'''ClapPreTrainedModel''',
'''ClapTextModel''',
'''ClapTextModelWithProjection''',
'''ClapAudioModel''',
'''ClapAudioModelWithProjection''',
]
_lowerCAmelCase : int = ['''ClapFeatureExtractor''']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 70 | 1 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = ["""input_features"""]
def __init__( self : Tuple , __UpperCAmelCase : List[Any]=80 , __UpperCAmelCase : Tuple=16000 , __UpperCAmelCase : Optional[Any]=160 , __UpperCAmelCase : Optional[int]=30 , __UpperCAmelCase : Any=400 , __UpperCAmelCase : List[Any]=0.0 , __UpperCAmelCase : Tuple=False , **__UpperCAmelCase : Dict , ):
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
a : Optional[int] = n_fft
a : Union[str, Any] = hop_length
a : Optional[int] = chunk_length
a : str = chunk_length * sampling_rate
a : List[str] = self.n_samples // hop_length
a : Dict = sampling_rate
a : List[str] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , )
def __snake_case ( self : int , __UpperCAmelCase : np.array):
a : List[str] = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , "hann") , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , )
a : Union[str, Any] = log_spec[:, :-1]
a : List[str] = np.maximum(__UpperCAmelCase , log_spec.max() - 8.0)
a : Dict = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __snake_case ( __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : float = 0.0):
if attention_mask is not None:
a : str = np.array(__UpperCAmelCase , np.intaa)
a : Dict = []
for vector, length in zip(__UpperCAmelCase , attention_mask.sum(-1)):
a : Any = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
a : Any = padding_value
normed_input_values.append(__UpperCAmelCase)
else:
a : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self : List[Any] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "max_length" , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , **__UpperCAmelCase : Tuple , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''')
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
a : Tuple = isinstance(__UpperCAmelCase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''')
a : str = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a : Optional[int] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray):
a : str = np.asarray(__UpperCAmelCase , dtype=np.floataa)
elif isinstance(__UpperCAmelCase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a : Union[str, Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a : Union[str, Any] = [np.asarray([raw_speech]).T]
a : Any = BatchFeature({"input_features": raw_speech})
# convert into correct format for padding
a : List[str] = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=max_length if max_length else self.n_samples , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a : int = self.zero_mean_unit_var_norm(
padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , )
a : int = np.stack(padded_inputs["input_features"] , axis=0)
# make sure list is in array format
a : Optional[Any] = padded_inputs.get("input_features").transpose(2 , 0 , 1)
a : Optional[int] = [self._np_extract_fbank_features(__UpperCAmelCase) for waveform in input_features[0]]
if isinstance(input_features[0] , __UpperCAmelCase):
a : Union[str, Any] = [np.asarray(__UpperCAmelCase , dtype=np.floataa) for feature in input_features]
else:
a : Union[str, Any] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a : Union[str, Any] = padded_inputs["attention_mask"][:, :: self.hop_length]
if return_tensors is not None:
a : int = padded_inputs.convert_to_tensors(__UpperCAmelCase)
return padded_inputs
def __snake_case ( self : List[Any]):
a : Tuple = copy.deepcopy(self.__dict__)
a : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 40 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ : int = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCAmelCase_ : int = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ )
UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCamelCase :Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase :Dict = [(pred, label)]
UpperCamelCase , UpperCamelCase :Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ )
UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" )
fas.append(__magic_name__ )
UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) )
UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ )
UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _A ( self : Optional[Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCamelCase :Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 38 | 0 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowercase_ = "src/transformers"
lowercase_ = "docs/source/en/tasks"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A__ = f.readlines()
# Find the start prompt.
A__ = 0
while not lines[start_index].startswith(SCREAMING_SNAKE_CASE__ ):
start_index += 1
start_index += 1
A__ = start_index
while not lines[end_index].startswith(SCREAMING_SNAKE_CASE__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowercase_ = direct_transformers_import(TRANSFORMERS_PATH)
lowercase_ = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowercase_ = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def _snake_case( SCREAMING_SNAKE_CASE__ : str ) -> Optional[Any]:
'''simple docstring'''
A__ = TASK_GUIDE_TO_MODELS[task_guide]
A__ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(SCREAMING_SNAKE_CASE__ , set() )
A__ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=False ) -> Dict:
'''simple docstring'''
A__ , A__ , A__ , A__ = _find_text_in_file(
filename=os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , start_prompt='<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->' , end_prompt='<!--End of the generated tip-->' , )
A__ = get_model_list_for_task(SCREAMING_SNAKE_CASE__ )
if current_list != new_list:
if overwrite:
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
' to fix this.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
lowercase_ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 370 |
import comet # From: unbabel-comet
import torch
import datasets
lowercase_ = datasets.logging.get_logger(__name__)
lowercase_ = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
lowercase_ = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
lowercase_ = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Optional[Any] )-> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION,citation=_CITATION,homepage='https://unbabel.github.io/COMET/html/index.html',inputs_description=_KWARGS_DESCRIPTION,features=datasets.Features(
{
'sources': datasets.Value('string',id='sequence' ),
'predictions': datasets.Value('string',id='sequence' ),
'references': datasets.Value('string',id='sequence' ),
} ),codebase_urls=['https://github.com/Unbabel/COMET'],reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
],)
def snake_case__ ( self : Optional[Any],lowercase_ : Dict )-> str:
'''simple docstring'''
if self.config_name == "default":
A__ = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
A__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def snake_case__ ( self : List[Any],lowercase_ : Dict,lowercase_ : int,lowercase_ : List[Any],lowercase_ : Tuple=None,lowercase_ : List[str]=False )-> int:
'''simple docstring'''
if gpus is None:
A__ = 1 if torch.cuda.is_available() else 0
A__ = {'src': sources, 'mt': predictions, 'ref': references}
A__ = [dict(zip(lowercase_,lowercase_ ) ) for t in zip(*data.values() )]
A__ , A__ = self.scorer.predict(lowercase_,gpus=lowercase_,progress_bar=lowercase_ )
return {"mean_score": mean_score, "scores": scores}
| 282 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
lowerCAmelCase__ : Union[str, Any] = '''http://www.mocksite.com/file1.txt'''
lowerCAmelCase__ : Optional[Any] = '''"text": ["foo", "foo"]'''
lowerCAmelCase__ : List[str] = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class __snake_case :
__lowerCamelCase = 200
__lowerCamelCase = {"""Content-Length""": """100"""}
__lowerCamelCase = {}
def __a ( self , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return [bytes(__UpperCamelCase , 'utf-8' )]
def UpperCamelCase__ ( *A__ , **A__ ) -> Optional[Any]:
return MockResponse()
@pytest.mark.parametrize('urls_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
import requests
monkeypatch.setattr(A__ , 'request' , A__ )
snake_case__ : Any = URL
if issubclass(A__ , A__ ):
snake_case__ : Optional[Any] = url
elif issubclass(A__ , A__ ):
snake_case__ : Dict = [url]
elif issubclass(A__ , A__ ):
snake_case__ : Any = {'train': url}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[str] = 'downloads'
snake_case__ : int = tmp_path
snake_case__ : Tuple = DownloadConfig(
cache_dir=os.path.join(A__ , A__ ) , use_etag=A__ , )
snake_case__ : Any = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Any = dl_manager.download(A__ )
snake_case__ : Dict = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(A__ , A__ ):
snake_case__ : int = [downloaded_paths]
snake_case__ : Any = [urls]
elif isinstance(A__ , A__ ):
assert "train" in downloaded_paths.keys()
snake_case__ : Union[str, Any] = downloaded_paths.values()
snake_case__ : Any = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(A__ , A__ ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
snake_case__ : int = Path(A__ )
snake_case__ : Optional[int] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
snake_case__ : Optional[Any] = downloaded_path.read_text()
assert content == CONTENT
snake_case__ : int = downloaded_path.with_suffix('.json' )
assert metadata_downloaded_path.exists()
snake_case__ : int = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('paths_type' , [str, list, dict] )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : Tuple = str(A__ )
if issubclass(A__ , A__ ):
snake_case__ : Dict = filename
elif issubclass(A__ , A__ ):
snake_case__ : Any = [filename]
elif issubclass(A__ , A__ ):
snake_case__ : Dict = {'train': filename}
snake_case__ : Union[str, Any] = 'dummy'
snake_case__ : List[Any] = xz_file.parent
snake_case__ : Dict = 'extracted'
snake_case__ : List[Any] = DownloadConfig(
cache_dir=A__ , use_etag=A__ , )
snake_case__ : Optional[int] = DownloadManager(dataset_name=A__ , download_config=A__ )
snake_case__ : Optional[Any] = dl_manager.extract(A__ )
snake_case__ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(A__ , A__ ):
snake_case__ : str = [extracted_paths]
snake_case__ : Dict = [paths]
elif isinstance(A__ , A__ ):
assert "train" in extracted_paths.keys()
snake_case__ : Any = extracted_paths.values()
snake_case__ : Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(A__ , A__ ):
assert extracted_path == dl_manager.extracted_paths[input_path]
snake_case__ : Optional[int] = Path(A__ )
snake_case__ : Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(A__ , etag=A__ )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
snake_case__ : Dict = extracted_path.read_text()
snake_case__ : Union[str, Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert path.endswith('.jsonl' )
for num_items, line in enumerate(A__ , start=1 ):
snake_case__ : Optional[int] = json.loads(line.decode('utf-8' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('archive_jsonl' , ['tar_jsonl_path', 'zip_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
snake_case__ : Tuple = request.getfixturevalue(A__ )
snake_case__ : Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_jsonl == 2
@pytest.mark.parametrize('archive_nested_jsonl' , ['tar_nested_jsonl_path', 'zip_nested_jsonl_path'] )
def UpperCamelCase__ ( A__ , A__ ) -> int:
snake_case__ : List[Any] = request.getfixturevalue(A__ )
snake_case__ : str = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(A__ ) , start=1 ):
_test_jsonl(A__ , A__ )
assert num_tar == 1
assert num_jsonl == 2
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
snake_case__ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(A__ ) , start=1 ):
assert os.path.basename(A__ ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 143 | from __future__ import annotations
def UpperCamelCase__ ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
snake_case__ : Optional[Any] = list(range(len(A__ ) ) )
snake_case__ : str = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
snake_case__ : float = 0
snake_case__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
snake_case__ : str = 1
max_value += value[i]
capacity -= weight[i]
else:
snake_case__ : str = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 143 | 1 |
import sys
from collections import defaultdict
class UpperCamelCase__ :
def __init__(self : Optional[Any] ):
__a : str = []
def lowerCAmelCase (self : int , snake_case_ : List[Any] ):
return self.node_position[vertex]
def lowerCAmelCase (self : List[str] , snake_case_ : str , snake_case_ : List[str] ):
__a : Any = pos
def lowerCAmelCase (self : str , snake_case_ : Any , snake_case_ : Optional[Any] , snake_case_ : Dict , snake_case_ : List[str] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__a : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__a : Union[str, Any] = 2 * start + 1
else:
__a : Dict = 2 * start + 2
if heap[smallest_child] < heap[start]:
__a : List[Any] = heap[smallest_child], positions[smallest_child]
__a : List[str] = (
heap[start],
positions[start],
)
__a : Any = temp, tempa
__a : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : int ):
__a : Any = position[index]
while index != 0:
__a : int = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__a : List[Any] = heap[parent]
__a : Dict = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
__a : Dict = val
__a : Tuple = temp
self.set_position(snake_case_ , snake_case_ )
break
__a : Tuple = parent
else:
__a : Optional[Any] = val
__a : List[str] = temp
self.set_position(snake_case_ , 0 )
def lowerCAmelCase (self : int , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[str] = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def lowerCAmelCase (self : Tuple , snake_case_ : int , snake_case_ : List[str] ):
__a : Optional[Any] = positions[0]
__a : Dict = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def __UpperCamelCase( lowerCAmelCase__ : Optional[int] ):
__a : Tuple = Heap()
__a : Optional[int] = [0] * len(lowerCAmelCase__ )
__a : Optional[Any] = [-1] * len(lowerCAmelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__a : Any = [] # Heap of Distance of vertices from their neighboring vertex
__a : Optional[int] = []
for vertex in range(len(lowerCAmelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(lowerCAmelCase__ )
heap.node_position.append(lowerCAmelCase__ )
__a : List[str] = []
__a : Tuple = 1
__a : Tuple = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__a : Dict = 0
__a : Optional[int] = distance
heap.heapify(lowerCAmelCase__ , lowerCAmelCase__ )
for _ in range(1 , len(lowerCAmelCase__ ) ):
__a : List[str] = heap.delete_minimum(lowerCAmelCase__ , lowerCAmelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__a : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(lowerCAmelCase__ )]
):
__a : Union[str, Any] = distance
heap.bottom_to_top(
lowerCAmelCase__ , heap.get_position(lowerCAmelCase__ ) , lowerCAmelCase__ , lowerCAmelCase__ )
__a : List[str] = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
lowercase__ =int(input('Enter number of edges: ').strip())
lowercase__ =defaultdict(list)
for _ in range(edges_number):
lowercase__ =[int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 352 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase__ ={
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['LayoutLMv2FeatureExtractor']
lowercase__ =['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 90 | 0 |
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
UpperCAmelCase : List[Any] = HfApi()
UpperCAmelCase : Optional[int] = {}
# fmt: off
UpperCAmelCase : int = torch.tensor([
-0.7515, -1.6883, 0.2420, 0.0300, 0.6347, 1.3433, -1.1743, -3.7467,
1.2342, -2.2485, 0.4636, 0.8076, -0.7991, 0.3969, 0.8498, 0.9189,
-1.8887, -3.3522, 0.7639, 0.2040, 0.6271, -2.7148, -1.6316, 3.0839,
0.3186, 0.2721, -0.9759, -1.2461, 2.6257, 1.3557
])
UpperCAmelCase : Optional[Any] = torch.tensor([
-2.3639, -2.5344, 0.0054, -0.6674, 1.5990, 1.0158, 0.3124, -2.1436,
1.8795, -2.5429, -0.1566, -0.3973, 1.2490, 2.6447, 1.2283, -0.5208,
-2.8154, -3.5119, 2.3838, 1.2033, 1.7201, -2.1256, -1.4576, 2.7948,
2.4204, -0.9752, -1.2546, 0.8027, 3.2758, 3.1365
])
UpperCAmelCase : str = torch.tensor([
-0.6531, -0.6891, -0.3172, -0.5375, -0.9140, -0.5367, -0.1175, -0.7869,
-0.3808, -0.4513, -0.2098, -0.0083, 0.3183, 0.5140, 0.2247, -0.1304,
-0.1302, -0.2802, -0.2084, -0.2025, -0.4967, -0.4873, -0.0861, 0.6925,
0.0250, 0.1290, -0.1543, 0.6316, 1.0460, 1.4943
])
UpperCAmelCase : Dict = torch.tensor([
0.0911, 0.1107, 0.0182, 0.0435, -0.0805, -0.0608, 0.0381, 0.2172,
-0.0280, 0.1327, -0.0299, -0.0255, -0.0050, -0.1170, -0.1046, 0.0309,
0.1367, 0.1728, -0.0533, -0.0748, -0.0534, 0.1624, 0.0384, -0.1805,
-0.0707, 0.0642, 0.0220, -0.0134, -0.1333, -0.1505
])
UpperCAmelCase : Dict = torch.tensor([
0.1321, 0.1337, 0.0440, 0.0622, -0.0591, -0.0370, 0.0503, 0.2133,
-0.0177, 0.1415, -0.0116, -0.0112, 0.0044, -0.0980, -0.0789, 0.0395,
0.1502, 0.1785, -0.0488, -0.0514, -0.0404, 0.1539, 0.0454, -0.1559,
-0.0665, 0.0659, 0.0383, -0.0005, -0.1266, -0.1386
])
UpperCAmelCase : Optional[Any] = torch.tensor([
0.1154, 0.1218, 0.0307, 0.0526, -0.0711, -0.0541, 0.0366, 0.2078,
-0.0267, 0.1317, -0.0226, -0.0193, -0.0014, -0.1055, -0.0902, 0.0330,
0.1391, 0.1709, -0.0562, -0.0693, -0.0560, 0.1482, 0.0381, -0.1683,
-0.0681, 0.0661, 0.0331, -0.0046, -0.1268, -0.1431
])
UpperCAmelCase : List[Any] = torch.tensor([
0.1192, 0.1240, 0.0414, 0.0606, -0.0557, -0.0412, 0.0430, 0.2042,
-0.0200, 0.1385, -0.0115, -0.0132, 0.0017, -0.0965, -0.0802, 0.0398,
0.1433, 0.1747, -0.0458, -0.0533, -0.0407, 0.1545, 0.0419, -0.1574,
-0.0645, 0.0626, 0.0341, -0.0010, -0.1199, -0.1390
])
UpperCAmelCase : int = torch.tensor([
0.1075, 0.1074, 0.0205, 0.0431, -0.0774, -0.0607, 0.0298, 0.2042,
-0.0320, 0.1267, -0.0281, -0.0250, -0.0064, -0.1091, -0.0946, 0.0290,
0.1328, 0.1650, -0.0580, -0.0738, -0.0586, 0.1440, 0.0337, -0.1746,
-0.0712, 0.0605, 0.0250, -0.0099, -0.1316, -0.1473
])
UpperCAmelCase : Any = torch.tensor([
-1.4572, -2.0481, -0.0414, -0.6005, 1.4136, 0.5848, 0.4028, -2.7330,
1.2212, -2.1228, 0.2155, 0.4039, 0.7662, 2.0535, 0.7477, -0.3243,
-2.1758, -2.7648, 1.6947, 0.7026, 1.2338, -1.6078, -0.8682, 2.2810,
1.8574, -0.5718, -0.5586, -0.0186, 2.3415, 2.1251])
UpperCAmelCase : Union[str, Any] = torch.tensor([
-1.3690, -1.9720, -0.4090, -0.6966, 1.4660, 0.9938, -0.1385, -2.7324,
0.7736, -1.8917, 0.2923, 0.4293, 0.1693, 1.4112, 1.1887, -0.3181,
-2.2160, -2.6381, 1.3170, 0.8163, 0.9240, -1.6544, -0.6099, 2.5259,
1.6430, -0.9090, -0.9392, -0.0126, 2.4268, 2.3266
])
UpperCAmelCase : List[Any] = torch.tensor([
-1.3525, -1.9628, -0.3956, -0.6860, 1.4664, 1.0014, -0.1259, -2.7212,
0.7772, -1.8811, 0.2996, 0.4388, 0.1704, 1.4029, 1.1701, -0.3027,
-2.2053, -2.6287, 1.3350, 0.8131, 0.9274, -1.6292, -0.6098, 2.5131,
1.6505, -0.8958, -0.9298, -0.0151, 2.4257, 2.3355
])
UpperCAmelCase : str = torch.tensor([
-2.0585, -2.7897, -0.2850, -0.8940, 1.9052, 0.5702, 0.6345, -3.8959,
1.5932, -3.2319, 0.1974, 0.0287, 1.7566, 2.6543, 0.8387, -0.5351,
-3.2736, -4.3375, 2.9029, 1.6390, 1.4640, -2.1701, -1.9013, 2.9341,
3.4981, -0.6255, -1.1644, -0.1591, 3.7097, 3.2066
])
UpperCAmelCase : Optional[Any] = torch.tensor([
-2.3139, -2.5594, -0.0197, -0.6785, 1.7001, 1.1606, 0.3075, -2.1740,
1.8071, -2.5630, -0.0926, -0.3811, 1.2116, 2.6246, 1.2731, -0.5398,
-2.8153, -3.6140, 2.3893, 1.3262, 1.6258, -2.1856, -1.3267, 2.8395,
2.3779, -1.0623, -1.2468, 0.8959, 3.3367, 3.2243
])
UpperCAmelCase : Optional[Any] = torch.tensor([
-2.0628, -2.7667, -0.2089, -0.8263, 2.0539, 0.5992, 0.6495, -3.8336,
1.6025, -3.2817, 0.1721, -0.0633, 1.7516, 2.7039, 0.8100, -0.5908,
-3.2113, -4.4343, 2.9257, 1.3632, 1.5562, -2.1489, -1.9894, 3.0560,
3.3396, -0.7328, -1.0417, 0.0383, 3.7093, 3.2343
])
UpperCAmelCase : Optional[int] = torch.tensor([
-1.4574, -2.0569, -0.0473, -0.6117, 1.4018, 0.5769, 0.4129, -2.7344,
1.2241, -2.1397, 0.2000, 0.3937, 0.7616, 2.0453, 0.7324, -0.3391,
-2.1746, -2.7744, 1.6963, 0.6921, 1.2187, -1.6172, -0.8877, 2.2439,
1.8471, -0.5839, -0.5605, -0.0464, 2.3250, 2.1219
])
# fmt: on
UpperCAmelCase : Union[str, Any] = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
UpperCAmelCase : int = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F"Started running {mod.modelId}!!!")
if mod.modelId.startswith('CompVis'):
UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
UpperCAmelCase : Dict = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
UpperCAmelCase : Optional[int] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
UpperCAmelCase : Dict = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
UpperCAmelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1E-3
)
print(F"{mod.modelId} has passed successfully!!!")
| 115 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCamelCase = {
"""configuration_biogpt""": ["""BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BioGptConfig"""],
"""tokenization_biogpt""": ["""BioGptTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"""BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BioGptForCausalLM""",
"""BioGptForTokenClassification""",
"""BioGptForSequenceClassification""",
"""BioGptModel""",
"""BioGptPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 59 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__( lowercase : Tuple , lowercase : str , lowercase : Union[str, Any] , lowercase : Dict ) -> str:
__snake_case : Optional[int] = sorted(zip(__A , __A ) , key=lambda lowercase : x[0] / x[1] , reverse=__A )
__snake_case , __snake_case : Optional[int] = [i[0] for i in r], [i[1] for i in r]
__snake_case : int = list(accumulate(__A ) )
__snake_case : List[str] = bisect(__A , __A )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 |
'''simple docstring'''
from datetime import datetime
import requests
def _lowercase ( __A ):
'''simple docstring'''
__UpperCamelCase = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
__UpperCamelCase = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(__A ).content
if __name__ == "__main__":
a__ : int = input('Enter Video/IGTV url: ').strip()
a__ : int = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, 'wb') as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 349 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """timesformer"""
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="divided_space_time" , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = num_frames
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = qkv_bias
UpperCamelCase = attention_type
UpperCamelCase = drop_path_rate
| 183 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list:
UpperCamelCase = word.split()
def justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
UpperCamelCase = max_width - width
UpperCamelCase = len(__UpperCamelCase )
if len(__UpperCamelCase ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
UpperCamelCase = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
UpperCamelCase = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
UpperCamelCase = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(__UpperCamelCase ):
num_spaces_between_words_list[i] += 1
UpperCamelCase = []
for i in range(__UpperCamelCase ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * """ """ )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(__UpperCamelCase )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = 0
for word in words:
if width + len(__UpperCamelCase ) + len(__UpperCamelCase ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(__UpperCamelCase )
width += len(__UpperCamelCase )
else:
# justify the line and add it to result
answer.append(justify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) )
# reset new line and new width
UpperCamelCase ,UpperCamelCase = [word], len(__UpperCamelCase )
UpperCamelCase = max_width - width - len(__UpperCamelCase )
answer.append(""" """.join(__UpperCamelCase ) + (remaining_spaces + 1) * """ """ )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 183 | 1 |
from typing import Any
def a_ ( __lowercase : list ) -> list[Any]:
if not input_list:
return []
_snake_case = [input_list.count(__lowercase ) for value in input_list]
_snake_case = max(__lowercase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(__lowercase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase_ : List[str] = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase_ : int = object()
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(A_ ) - len(A_ ) + 1 ):
_UpperCAmelCase : List[Any] = [x.match(A_ ) for x, y in zip(A_ , ks[i:] )]
if matches and all(A_ ):
return True
return False
def __A ( lowerCAmelCase_ ):
def replace(lowerCAmelCase_ , lowerCAmelCase_ ):
for rule, replacement in rules:
if _match(A_ , A_ ):
return replacement
return val
return replace
def __A ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , A_ )),
(("transformer", "wte", "embedding"), P("""mp""" , A_ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(A_ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , A_ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(A_ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , A_ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def __A ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = _get_partition_rules()
_UpperCAmelCase : str = _replacement_rules(A_ )
_UpperCAmelCase : Optional[Any] = {k: _unmatched for k in flatten_dict(A_ )}
_UpperCAmelCase : List[Any] = {k: replace(A_ , A_ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(A_ ) )
| 358 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[Any] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ : Tuple = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ : Tuple = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : str = ["""input_ids""", """attention_mask"""]
snake_case : List[str] = RobertaTokenizer
def __init__(self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__="replace" , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=False , lowerCAmelCase__=True , **lowerCAmelCase__ , ):
super().__init__(
lowerCAmelCase__ , lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , trim_offsets=lowerCAmelCase__ , **lowerCAmelCase__ , )
_UpperCAmelCase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : Tuple = getattr(lowerCAmelCase__ , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase : Any = add_prefix_space
_UpperCAmelCase : List[Any] = pre_tok_class(**lowerCAmelCase__ )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : int = """post_processor"""
_UpperCAmelCase : Any = getattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
if tokenizer_component_instance:
_UpperCAmelCase : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase : Any = tuple(state["""sep"""] )
if "cls" in state:
_UpperCAmelCase : Tuple = tuple(state["""cls"""] )
_UpperCAmelCase : Dict = False
if state.get("""add_prefix_space""" , lowerCAmelCase__ ) != add_prefix_space:
_UpperCAmelCase : List[str] = add_prefix_space
_UpperCAmelCase : Dict = True
if state.get("""trim_offsets""" , lowerCAmelCase__ ) != trim_offsets:
_UpperCAmelCase : Tuple = trim_offsets
_UpperCAmelCase : List[str] = True
if changes_to_apply:
_UpperCAmelCase : Dict = getattr(lowerCAmelCase__ , state.pop("""type""" ) )
_UpperCAmelCase : Optional[Any] = component_class(**lowerCAmelCase__ )
setattr(self.backend_tokenizer , lowerCAmelCase__ , lowerCAmelCase__ )
@property
def snake_case_ (self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def snake_case_ (self , lowerCAmelCase__ ):
_UpperCAmelCase : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else value
_UpperCAmelCase : int = value
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[Any] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = kwargs.get("""is_split_into_words""" , lowerCAmelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCAmelCase__ , **lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : Union[str, Any] = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ )
return tuple(lowerCAmelCase__ )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__=None ):
_UpperCAmelCase : int = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ = None ):
_UpperCAmelCase : str = [self.sep_token_id]
_UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 170 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = data
_lowerCAmelCase : Tuple = [0X67_45_23_01, 0XEF_CD_AB_89, 0X98_BA_DC_FE, 0X10_32_54_76, 0XC3_D2_E1_F0]
@staticmethod
def snake_case__ ( __a, __a):
'''simple docstring'''
return ((n << b) | (n >> (32 - b))) & 0XFF_FF_FF_FF
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = B"\x80" + B"\x00" * (63 - (len(self.data) + 8) % 64)
_lowerCAmelCase : int = self.data + padding + struct.pack(">Q", 8 * len(self.data))
return padded_data
def snake_case__ ( self):
'''simple docstring'''
return [
self.padded_data[i : i + 64] for i in range(0, len(self.padded_data), 64)
]
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = list(struct.unpack(">16L", __a)) + [0] * 64
for i in range(16, 80):
_lowerCAmelCase : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]), 1)
return w
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.padding()
_lowerCAmelCase : int = self.split_blocks()
for block in self.blocks:
_lowerCAmelCase : str = self.expand_block(__a)
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.h
for i in range(0, 80):
if 0 <= i < 20:
_lowerCAmelCase : int = (b & c) | ((~b) & d)
_lowerCAmelCase : str = 0X5A_82_79_99
elif 20 <= i < 40:
_lowerCAmelCase : Optional[Any] = b ^ c ^ d
_lowerCAmelCase : Optional[Any] = 0X6E_D9_EB_A1
elif 40 <= i < 60:
_lowerCAmelCase : List[str] = (b & c) | (b & d) | (c & d)
_lowerCAmelCase : Union[str, Any] = 0X8F_1B_BC_DC
elif 60 <= i < 80:
_lowerCAmelCase : Optional[int] = b ^ c ^ d
_lowerCAmelCase : str = 0XCA_62_C1_D6
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = (
self.rotate(__a, 5) + f + e + k + expanded_block[i] & 0XFF_FF_FF_FF,
a,
self.rotate(__a, 30),
c,
d,
)
_lowerCAmelCase : str = (
self.h[0] + a & 0XFF_FF_FF_FF,
self.h[1] + b & 0XFF_FF_FF_FF,
self.h[2] + c & 0XFF_FF_FF_FF,
self.h[3] + d & 0XFF_FF_FF_FF,
self.h[4] + e & 0XFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h)
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = b"Test String"
assert SHAaHash(_lowerCamelCase ).final_hash() == hashlib.shaa(_lowerCamelCase ).hexdigest() # noqa: S324
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[str] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
_lowerCAmelCase : Tuple = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
_lowerCAmelCase : List[Any] = f.read()
else:
_lowerCAmelCase : Any = bytes(_lowerCamelCase , "utf-8" )
print(SHAaHash(_lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_snake_case = logging.get_logger(__name__)
if is_vision_available():
import PIL
class a__ ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Dict = ['pixel_values']
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = 1 / 255 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , **_UpperCamelCase , ):
"""simple docstring"""
super().__init__(**_UpperCamelCase )
_lowercase : Dict = size if size is not None else {"shortest_edge": 224}
_lowercase : List[Any] = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
_lowercase : Union[str, Any] = crop_size if crop_size is not None else {"height": 224, "width": 224}
_lowercase : Tuple = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase , param_name="crop_size" )
_lowercase : List[str] = do_resize
_lowercase : Dict = size
_lowercase : Any = resample
_lowercase : int = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Tuple = do_rescale
_lowercase : Any = rescale_factor
_lowercase : Union[str, Any] = do_normalize
_lowercase : List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_lowercase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
_lowercase : Optional[int] = do_convert_rgb
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BICUBIC , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase , default_to_square=_UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
_lowercase : List[str] = get_resize_output_image_size(_UpperCamelCase , size=size["shortest_edge"] , default_to_square=_UpperCamelCase )
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : int = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(_UpperCamelCase , size=(size["height"], size["width"]) , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ):
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ):
"""simple docstring"""
_lowercase : Tuple = do_resize if do_resize is not None else self.do_resize
_lowercase : Union[str, Any] = size if size is not None else self.size
_lowercase : Optional[int] = get_size_dict(_UpperCamelCase , param_name="size" , default_to_square=_UpperCamelCase )
_lowercase : List[Any] = resample if resample is not None else self.resample
_lowercase : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowercase : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
_lowercase : Tuple = get_size_dict(_UpperCamelCase , param_name="crop_size" , default_to_square=_UpperCamelCase )
_lowercase : Any = do_rescale if do_rescale is not None else self.do_rescale
_lowercase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
_lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
_lowercase : Dict = image_std if image_std is not None else self.image_std
_lowercase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_lowercase : str = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_lowercase : List[Any] = [convert_to_rgb(_UpperCamelCase ) for image in images]
# All transformations expect numpy arrays.
_lowercase : List[Any] = [to_numpy_array(_UpperCamelCase ) for image in images]
if do_resize:
_lowercase : Optional[Any] = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_center_crop:
_lowercase : Optional[int] = [self.center_crop(image=_UpperCamelCase , size=_UpperCamelCase ) for image in images]
if do_rescale:
_lowercase : Any = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
_lowercase : List[Any] = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
_lowercase : List[Any] = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
_lowercase : Dict = {"pixel_values": images}
return BatchFeature(data=_UpperCamelCase , tensor_type=_UpperCamelCase )
| 250 | 0 |
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> int:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase__ : Optional[Any] = F"""Input value of [number={number}] must be an integer"""
raise TypeError(_UpperCAmelCase )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(_UpperCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> bool:
lowerCamelCase__ : List[Any] = get_failure_array(_UpperCAmelCase )
# 2) Step through text searching for pattern
lowerCamelCase__ , lowerCamelCase__ : List[str] = 0, 0 # index into text, pattern
while i < len(_UpperCAmelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCAmelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
lowerCamelCase__ : str = failure[j - 1]
continue
i += 1
return False
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
lowerCamelCase__ : int = [0]
lowerCamelCase__ : List[Any] = 0
lowerCamelCase__ : Any = 1
while j < len(_UpperCAmelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
lowerCamelCase__ : int = failure[i - 1]
continue
j += 1
failure.append(_UpperCAmelCase )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCAmelCase : Union[str, Any] = """abc1abc12"""
_UpperCAmelCase : List[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase : Dict = """alskfjaldsk23adsfabcabc"""
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCAmelCase : Any = """ABABX"""
_UpperCAmelCase : Union[str, Any] = """ABABZABABYABABX"""
assert kmp(pattern, text)
# Test 3)
_UpperCAmelCase : int = """AAAB"""
_UpperCAmelCase : str = """ABAAAAAB"""
assert kmp(pattern, text)
# Test 4)
_UpperCAmelCase : Optional[Any] = """abcdabcy"""
_UpperCAmelCase : List[Any] = """abcxabcdabxabcdabcdabcy"""
assert kmp(pattern, text)
# Test 5)
_UpperCAmelCase : str = """aabaabaaa"""
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 45 | 1 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
_lowerCamelCase : str = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[int] = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : int = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : Dict = model(__lowerCAmelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
_lowerCamelCase : Tuple = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : int = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(__lowerCAmelCase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1E-3 ) )
| 72 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
lowerCamelCase_ : Tuple = logging.get_logger(__name__)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = R'\w+[.]\d+'
A_ : int = re.findall(_UpperCAmelCase , _UpperCAmelCase )
for pat in pats:
A_ : Optional[int] = key.replace(_UpperCAmelCase , '_'.join(pat.split('.' ) ) )
return key
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = pt_tuple_key[:-1] + ('scale',)
if (
any('norm' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ('scale',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : Optional[Any] = pt_tuple_key[:-1] + ('embedding',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : str = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight":
A_ : Optional[Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : Tuple = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Optional[int] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=42 ):
"""simple docstring"""
A_ : int = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Union[str, Any] = flax_model.init_weights(PRNGKey(_UpperCAmelCase ) )
A_ : Optional[Any] = flatten_dict(_UpperCAmelCase )
A_ : Tuple = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Any = rename_key(_UpperCAmelCase )
A_ : List[str] = tuple(renamed_pt_key.split('.' ) )
# Correctly rename weight parameters
A_ , A_ : Union[str, Any] = rename_key_and_reshape_tensor(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"""PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape """
f"""{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
# also add unexpected weight so that warning is thrown
A_ : str = jnp.asarray(_UpperCAmelCase )
return unflatten_dict(_UpperCAmelCase ) | 286 | 0 |
"""simple docstring"""
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
a : Optional[int] = {
'''tiny.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt''',
'''tiny''': '''https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt''',
'''base.en''': '''https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt''',
'''base''': '''https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt''',
'''small.en''': '''https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt''',
'''small''': '''https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt''',
'''medium.en''': '''https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt''',
'''medium''': '''https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt''',
'''large''': '''https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt''',
'''large-v2''': '''https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : List[Any] ) ->List[Any]:
'''simple docstring'''
a : List[str] = ["layers", "blocks"]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase )
a : Union[str, Any] = {
'''blocks''': '''layers''',
'''mlp.0''': '''fc1''',
'''mlp.2''': '''fc2''',
'''mlp_ln''': '''final_layer_norm''',
'''.attn.query''': '''.self_attn.q_proj''',
'''.attn.key''': '''.self_attn.k_proj''',
'''.attn.value''': '''.self_attn.v_proj''',
'''.attn_ln''': '''.self_attn_layer_norm''',
'''.attn.out''': '''.self_attn.out_proj''',
'''.cross_attn.query''': '''.encoder_attn.q_proj''',
'''.cross_attn.key''': '''.encoder_attn.k_proj''',
'''.cross_attn.value''': '''.encoder_attn.v_proj''',
'''.cross_attn_ln''': '''.encoder_attn_layer_norm''',
'''.cross_attn.out''': '''.encoder_attn.out_proj''',
'''decoder.ln.''': '''decoder.layer_norm.''',
'''encoder.ln.''': '''encoder.layer_norm.''',
'''token_embedding''': '''embed_tokens''',
'''encoder.positional_embedding''': '''encoder.embed_positions.weight''',
'''decoder.positional_embedding''': '''decoder.embed_positions.weight''',
'''ln_post''': '''layer_norm''',
}
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
a : Dict = list(s_dict.keys() )
for key in keys:
a : Tuple = key
for k, v in WHISPER_MAPPING.items():
if k in key:
a : List[Any] = new_key.replace(_lowercase , _lowercase )
print(F"""{key} -> {new_key}""" )
a : List[Any] = s_dict.pop(_lowercase )
return s_dict
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
a : Optional[Any] = emb.weight.shape
a : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase )
a : Any = emb.weight.data
return lin_layer
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->bytes:
'''simple docstring'''
os.makedirs(_lowercase , exist_ok=_lowercase )
a : Dict = os.path.basename(_lowercase )
a : List[str] = url.split("/" )[-2]
a : Optional[int] = os.path.join(_lowercase , _lowercase )
if os.path.exists(_lowercase ) and not os.path.isfile(_lowercase ):
raise RuntimeError(F"""{download_target} exists and is not a regular file""" )
if os.path.isfile(_lowercase ):
a : str = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" )
with urllib.request.urlopen(_lowercase ) as source, open(_lowercase , "wb" ) as output:
with tqdm(
total=int(source.info().get("Content-Length" ) ) , ncols=80 , unit="iB" , unit_scale=_lowercase , unit_divisor=1024 ) as loop:
while True:
a : Optional[Any] = source.read(8192 )
if not buffer:
break
output.write(_lowercase )
loop.update(len(_lowercase ) )
a : Optional[Any] = open(_lowercase , "rb" ).read()
if hashlib.shaaaa(_lowercase ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." )
return model_bytes
def _SCREAMING_SNAKE_CASE ( _lowercase : Dict , _lowercase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
if ".pt" not in checkpoint_path:
a : Optional[int] = _download(_MODELS[checkpoint_path] )
else:
a : Tuple = torch.load(_lowercase , map_location="cpu" )
a : str = original_checkpoint["dims"]
a : Optional[int] = original_checkpoint["model_state_dict"]
a : str = state_dict["decoder.token_embedding.weight"]
remove_ignore_keys_(_lowercase )
rename_keys(_lowercase )
a : List[Any] = True
a : Optional[int] = state_dict["decoder.layers.0.fc1.weight"].shape[0]
a : Tuple = WhisperConfig(
vocab_size=dimensions["n_vocab"] , encoder_ffn_dim=_lowercase , decoder_ffn_dim=_lowercase , num_mel_bins=dimensions["n_mels"] , d_model=dimensions["n_audio_state"] , max_target_positions=dimensions["n_text_ctx"] , encoder_layers=dimensions["n_audio_layer"] , encoder_attention_heads=dimensions["n_audio_head"] , decoder_layers=dimensions["n_text_layer"] , decoder_attention_heads=dimensions["n_text_state"] , max_source_positions=dimensions["n_audio_ctx"] , )
a : Union[str, Any] = WhisperForConditionalGeneration(_lowercase )
a : int = model.model.load_state_dict(_lowercase , strict=_lowercase )
if len(_lowercase ) > 0 and not set(_lowercase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
a : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
a : List[Any] = proj_out_weights
model.save_pretrained(_lowercase )
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Patht to the downloaded checkpoints''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
a : Union[str, Any] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 370 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 100 ) ->int:
'''simple docstring'''
a : Dict = sum(i * i for i in range(1 , n + 1 ) )
a : Tuple = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 79 | 0 |
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __lowerCAmelCase :
def __init__( self :Optional[Any] , __magic_name__ :List[Any] , __magic_name__ :Optional[int]=99 , __magic_name__ :Any=13 , __magic_name__ :List[str]=7 , __magic_name__ :str=9 , __magic_name__ :Union[str, Any]=True , __magic_name__ :Optional[int]=True , __magic_name__ :Optional[int]=False , __magic_name__ :Union[str, Any]=32 , __magic_name__ :Union[str, Any]=5 , __magic_name__ :Any=4 , __magic_name__ :Optional[Any]=37 , __magic_name__ :Any=8 , __magic_name__ :Union[str, Any]=0.1 , __magic_name__ :List[Any]=0.002 , __magic_name__ :int=1 , __magic_name__ :Tuple=0 , __magic_name__ :List[Any]=0 , __magic_name__ :List[str]=None , __magic_name__ :Optional[int]=None , ):
'''simple docstring'''
a = parent
a = batch_size
a = encoder_seq_length
a = decoder_seq_length
# For common tests
a = self.decoder_seq_length
a = is_training
a = use_attention_mask
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = d_ff
a = relative_attention_num_buckets
a = dropout_rate
a = initializer_factor
a = eos_token_id
a = pad_token_id
a = decoder_start_token_id
a = None
a = decoder_layers
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def lowerCamelCase__ ( self :Dict , __magic_name__ :Dict , __magic_name__ :List[Any] , __magic_name__ :int , __magic_name__ :int=None , __magic_name__ :Dict=None , __magic_name__ :Union[str, Any]=None , __magic_name__ :List[Any]=None , __magic_name__ :Optional[int]=None , ):
'''simple docstring'''
if attention_mask is None:
a = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
a = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
a = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__magic_name__ )
if decoder_head_mask is None:
a = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__magic_name__ )
if cross_attn_head_mask is None:
a = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__magic_name__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
a = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
a = input_ids.clamp(self.pad_token_id + 1 )
a = decoder_input_ids.clamp(self.pad_token_id + 1 )
a = self.get_config()
a = config.num_attention_heads
a = self.prepare_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, input_dict
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a , a = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowerCamelCase__ ( self :Optional[Any] , __magic_name__ :Optional[int] , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Any , __magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = UMTaModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
a = model(
input_ids=__magic_name__ , decoder_input_ids=__magic_name__ , attention_mask=__magic_name__ , decoder_attention_mask=__magic_name__ , )
a = model(input_ids=__magic_name__ , decoder_input_ids=__magic_name__ )
a = result.last_hidden_state
a = result.past_key_values
a = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__magic_name__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :List[Any] , __magic_name__ :List[str] , __magic_name__ :Optional[Any] , ):
'''simple docstring'''
a = UMTaModel(config=__magic_name__ ).get_decoder().to(__magic_name__ ).eval()
# first forward pass
a = model(__magic_name__ , use_cache=__magic_name__ )
a = model(__magic_name__ )
a = model(__magic_name__ , use_cache=__magic_name__ )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) )
self.parent.assertTrue(len(__magic_name__ ) == len(__magic_name__ ) + 1 )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
a = torch.cat([input_ids, next_tokens] , dim=-1 )
a = model(__magic_name__ )["""last_hidden_state"""]
a = model(__magic_name__ , past_key_values=__magic_name__ )["""last_hidden_state"""]
# select random slice
a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
a = output_from_no_past[:, -1, random_slice_idx].detach()
a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__magic_name__ , __magic_name__ , atol=1E-3 ) )
def lowerCamelCase__ ( self :str , __magic_name__ :Any , __magic_name__ :Union[str, Any] , ):
'''simple docstring'''
a = UMTaModel(config=__magic_name__ ).to(__magic_name__ ).half().eval()
a = model(**__magic_name__ )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__magic_name__ ).any().item() )
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
UpperCamelCase__ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
UpperCamelCase__ = [0.8, 0.9]
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
a = UMTaModel(config_and_inputs[0] ).to(__magic_name__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__magic_name__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F'{tmpdirname}/t5_test.onnx' , export_params=__magic_name__ , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__magic_name__ )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
a = self.model_tester.prepare_config_and_inputs()
a = config_and_inputs[0]
a = UMTaForConditionalGeneration(__magic_name__ ).eval()
model.to(__magic_name__ )
a = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__magic_name__ ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__magic_name__ ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__magic_name__ ),
}
for attn_name, (name, mask) in zip(__magic_name__ , head_masking.items() ):
a = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
a = torch.ones(
config.num_decoder_layers , config.num_heads , device=__magic_name__ )
a = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__magic_name__ , return_dict_in_generate=__magic_name__ , **__magic_name__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
a = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__magic_name__ ).to(__magic_name__ )
a = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__magic_name__ , legacy=__magic_name__ )
a = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
a = tokenizer(__magic_name__ , return_tensors="""pt""" , padding=__magic_name__ ).input_ids
# fmt: off
a = torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(__magic_name__ , __magic_name__ )
a = model.generate(input_ids.to(__magic_name__ ) )
a = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
a = tokenizer.batch_decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 228 |
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
__UpperCamelCase : Union[str, Any] = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
a = getattr(__lowerCamelCase , __lowerCamelCase )
if weight_type is not None:
a = getattr(__lowerCamelCase , __lowerCamelCase ).shape
else:
a = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
else:
a = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def __A ( __lowerCamelCase , __lowerCamelCase ) -> int:
a = []
a = fairseq_model.state_dict()
a = hf_model.feature_extractor
a = hf_model.adapter
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
a = True
elif any(x in name for x in ["""adaptor""", """w2v_encoder.proj.""", """w2v_proj_ln."""] ):
load_adapter(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
a = True
if "*" in mapped_key:
a = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
a = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
a = """weight_g"""
elif "weight_v" in name:
a = """weight_v"""
elif "bias" in name:
a = """bias"""
elif "weight" in name:
a = """weight"""
else:
a = None
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
continue
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Tuple:
a = full_name.split("""conv_layers.""" )[-1]
a = name.split(""".""" )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
a = full_name.split("""adaptor.""" )[-1]
a = name.split(""".""" )
if items[1].isdigit():
a = int(items[1] )
else:
a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
a = value
logger.info(f'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), f'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
a = value
logger.info(f'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(__lowerCamelCase )
def __A ( __lowerCamelCase ) -> Tuple:
a , a = emb.weight.shape
a = nn.Linear(__lowerCamelCase , __lowerCamelCase , bias=__lowerCamelCase )
a = emb.weight.data
return lin_layer
@torch.no_grad()
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , ) -> Optional[int]:
a = WavaVecaConfig.from_pretrained(
__lowerCamelCase , add_adapter=__lowerCamelCase , adapter_stride=__lowerCamelCase , adapter_kernel_size=__lowerCamelCase , use_auth_token=__lowerCamelCase , output_hidden_size=__lowerCamelCase , )
a = MBartConfig.from_pretrained(__lowerCamelCase )
# load model
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"""config_yaml""": config_yaml_path,
"""data""": """/""".join(dict_path.split("""/""" )[:-1] ),
"""w2v_path""": checkpoint_path,
"""load_pretrained_decoder_from""": None,
} , )
a = model[0].eval()
# load feature extractor
a = WavaVecaFeatureExtractor.from_pretrained(__lowerCamelCase , use_auth_token=__lowerCamelCase )
# set weights for wav2vec2 encoder
a = WavaVecaModel(__lowerCamelCase )
recursively_load_weights_wavaveca(model.encoder , __lowerCamelCase )
# load decoder weights
a = MBartForCausalLM(__lowerCamelCase )
a , a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__lowerCamelCase )
logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
a = SpeechEncoderDecoderModel(encoder=__lowerCamelCase , decoder=__lowerCamelCase )
a = False
a = MBartaaTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
a = hf_wavavec.config.to_dict()
a = tokenizer.pad_token_id
a = tokenizer.bos_token_id
a = tokenizer.eos_token_id
a = """mbart50"""
a = """wav2vec2"""
a = tokenizer.eos_token_id
a = 25_0004
a = tokenizer.eos_token_id
a = SpeechEncoderDecoderConfig.from_dict(__lowerCamelCase )
hf_wavavec.save_pretrained(__lowerCamelCase )
feature_extractor.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-xls-r-1b",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/mbart-large-50-one-to-many-mmt",
type=str,
help="Path to hf decoder checkpoint config",
)
parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers")
parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers")
parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers")
parser.add_argument("--encoder_output_dim", default=1_024, type=int, help="encoder output dim")
parser.add_argument("--start_token_id", default=250_004, type=int, help="`decoder_start_token_id` of model config")
__UpperCamelCase : int = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 228 | 1 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
A__ : int = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def _snake_case ( lowerCamelCase__ : Dict , lowerCamelCase__ : str ) -> Any:
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def _snake_case ( lowerCamelCase__ : List[Any] ) -> int:
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_lowerCamelCase )
def _snake_case ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any ) -> Union[str, Any]:
lowerCamelCase_ : int =tmp_path_factory.getbasetemp() / "cache"
lowerCamelCase_ : Any =test_hf_cache_home / "datasets"
lowerCamelCase_ : int =test_hf_cache_home / "metrics"
lowerCamelCase_ : List[str] =test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_lowerCamelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_lowerCamelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_lowerCamelCase ) )
lowerCamelCase_ : Dict =test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_lowerCamelCase ) )
lowerCamelCase_ : List[str] =test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_lowerCamelCase ) )
@pytest.fixture(autouse=_lowerCamelCase , scope="session" )
def _snake_case ( ) -> List[str]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=_lowerCamelCase )
def _snake_case ( lowerCamelCase__ : List[str] ) -> Optional[Any]:
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _lowerCamelCase )
@pytest.fixture
def _snake_case ( lowerCamelCase__ : Tuple ) -> Union[str, Any]:
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _lowerCamelCase )
| 370 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : list[int] ) -> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int ) -> bool:
# Base Case
if curr_ind == len(lowerCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCamelCase__ ) ):
if valid_connection(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# Insert current vertex into path as next transition
lowerCamelCase_ : Tuple =next_ver
# Validate created path
if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
lowerCamelCase_ : int =-1
return False
def _snake_case ( lowerCamelCase__ : list[list[int]] , lowerCamelCase__ : int = 0 ) -> list[int]:
lowerCamelCase_ : Optional[Any] =[-1] * (len(lowerCamelCase__ ) + 1)
# initialize start and end of path with starting index
lowerCamelCase_ : Optional[int] =start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCamelCase__ , lowerCamelCase__ , 1 ) else []
| 209 | 0 |
'''simple docstring'''
# flake8: noqa
# Lint as: python3
__SCREAMING_SNAKE_CASE :Optional[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 22 | """simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44 | 0 |
from __future__ import annotations
def UpperCamelCase ( _A ):
"""simple docstring"""
if not nums:
raise ValueError("""List is empty""" )
return sum(_A ) / len(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
__magic_name__: Any = None
__magic_name__: Dict = logging.get_logger(__name__)
__magic_name__: Any = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__magic_name__: str = {
"vocab_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model",
},
"tokenizer_file": {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json",
},
}
__magic_name__: Optional[Any] = {
"xlnet-base-cased": None,
"xlnet-large-cased": None,
}
__magic_name__: Optional[Any] = "▁"
# Segments (not really needed)
__magic_name__: List[Any] = 0
__magic_name__: Dict = 1
__magic_name__: List[str] = 2
__magic_name__: List[Any] = 3
__magic_name__: Optional[int] = 4
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Dict = VOCAB_FILES_NAMES
lowercase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = '''left'''
lowercase__ : List[str] = XLNetTokenizer
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<sep>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<cls>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__=["<eop>", "<eod>"] , **lowerCAmelCase__ , ) -> Tuple:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
vocab_file=lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , remove_space=lowerCAmelCase__ , keep_accents=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , **lowerCAmelCase__ , )
__magic_name__ : List[str] = 3
__magic_name__ : str = do_lower_case
__magic_name__ : Union[str, Any] = remove_space
__magic_name__ : str = keep_accents
__magic_name__ : Tuple = vocab_file
__magic_name__ : List[Any] = False if not self.vocab_file else True
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : Any = [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[int]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : Optional[Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : List[str] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase__ ):
copyfile(self.vocab_file , lowerCAmelCase__ )
return (out_vocab_file,)
| 138 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
# if input_string is "aba" than new_input_string become "a|b|a"
__SCREAMING_SNAKE_CASE = ""
__SCREAMING_SNAKE_CASE = ""
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(SCREAMING_SNAKE_CASE__ ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 0
# length[i] shows the length of palindromic substring with center i
__SCREAMING_SNAKE_CASE = [1 for i in range(len(SCREAMING_SNAKE_CASE__ ) )]
# for each character in new_string find corresponding palindromic string
__SCREAMING_SNAKE_CASE = 0
for j in range(len(SCREAMING_SNAKE_CASE__ ) ):
__SCREAMING_SNAKE_CASE = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(SCREAMING_SNAKE_CASE__ )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
__SCREAMING_SNAKE_CASE = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
__SCREAMING_SNAKE_CASE = j - k + 1 # noqa: E741
__SCREAMING_SNAKE_CASE = j + k - 1
# update max_length and start position
if max_length < length[j]:
__SCREAMING_SNAKE_CASE = length[j]
__SCREAMING_SNAKE_CASE = j
# create that string
__SCREAMING_SNAKE_CASE = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 54 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A__ = 'cpu'
A__ = Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
A__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
A__ = vae_decoder.config.latent_channels
# forward only through the decoder part
A__ = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 7 | 0 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: List[str] , lowerCAmelCase: List[str] ) -> Optional[Any]:
_UpperCAmelCase : Any = len(_lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for i in range(len(_lowerCAmelCase ) - pat_len + 1 ):
_UpperCAmelCase : Dict = True
for j in range(_lowerCAmelCase ):
if s[i + j] != pattern[j]:
_UpperCAmelCase : str = False
break
if match_found:
position.append(_lowerCAmelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search('ABCDEFG', 'DE') == [3]
print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
| 354 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: str , lowerCAmelCase: str ) -> bool:
_UpperCAmelCase : Optional[Any] = len(lowerCAmelCase ) + 1
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase : List[str] = [[0 for i in range(lowerCAmelCase )] for j in range(lowerCAmelCase )]
# since string of zero length match pattern of zero length
_UpperCAmelCase : List[Any] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Dict = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowerCAmelCase ):
_UpperCAmelCase : Tuple = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowerCAmelCase ):
for j in range(1 , lowerCAmelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase : Optional[Any] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase : str = dp[i - 1][j]
else:
_UpperCAmelCase : int = 0
else:
_UpperCAmelCase : List[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
SCREAMING_SNAKE_CASE_ = 'aab'
SCREAMING_SNAKE_CASE_ = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 189 | 0 |
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowerCAmelCase__ = '''src/diffusers'''
lowerCAmelCase__ = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowerCAmelCase__ = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowerCAmelCase__ = spec.loader.load_module()
def _A ( A__ , A__ ):
"""simple docstring"""
return line.startswith(A__ ) or len(A__ ) <= 1 or re.search(R'''^\s*\)(\s*->.*:|:)\s*$''' , A__ ) is not None
def _A ( A__ ):
"""simple docstring"""
__lowercase = object_name.split('''.''' )
__lowercase = 0
# First let's find the module where our object lives.
__lowercase = parts[i]
while i < len(A__ ) and not os.path.isfile(os.path.join(A__ , F"{module}.py" ) ):
i += 1
if i < len(A__ ):
__lowercase = os.path.join(A__ , parts[i] )
if i >= len(A__ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(A__ , F"{module}.py" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
# Now let's find the class / func in the code!
__lowercase = ''''''
__lowercase = 0
for name in parts[i + 1 :]:
while (
line_index < len(A__ ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(A__ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__lowercase = line_index
while line_index < len(A__ ) and _should_continue(lines[line_index] , A__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
return "".join(A__ )
lowerCAmelCase__ = re.compile(R'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowerCAmelCase__ = re.compile(R'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowerCAmelCase__ = re.compile(R'''<FILL\s+[^>]*>''')
def _A ( A__ ):
"""simple docstring"""
__lowercase = code.split('''\n''' )
__lowercase = 0
while idx < len(A__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(A__ ):
return re.search(R'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def _A ( A__ ):
"""simple docstring"""
__lowercase = len(get_indent(A__ ) ) > 0
if has_indent:
__lowercase = F"class Bla:\n{code}"
__lowercase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=A__ )
__lowercase = black.format_str(A__ , mode=A__ )
__lowercase , __lowercase = style_docstrings_in_code(A__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def _A ( A__ , A__=False ):
"""simple docstring"""
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__lowercase = f.readlines()
__lowercase = []
__lowercase = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(A__ ):
__lowercase = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__lowercase , __lowercase , __lowercase = search.groups()
__lowercase = find_code_in_diffusers(A__ )
__lowercase = get_indent(A__ )
__lowercase = line_index + 1 if indent == theoretical_indent else line_index + 2
__lowercase = theoretical_indent
__lowercase = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__lowercase = True
while line_index < len(A__ ) and should_continue:
line_index += 1
if line_index >= len(A__ ):
break
__lowercase = lines[line_index]
__lowercase = _should_continue(A__ , A__ ) and re.search(F"^{indent}# End copy" , A__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__lowercase = lines[start_index:line_index]
__lowercase = ''''''.join(A__ )
# Remove any nested `Copied from` comments to avoid circular copies
__lowercase = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(A__ ) is None]
__lowercase = '''\n'''.join(A__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(A__ ) > 0:
__lowercase = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
__lowercase = [_re_replace_pattern.search(A__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__lowercase , __lowercase , __lowercase = pattern.groups()
__lowercase = re.sub(A__ , A__ , A__ )
if option.strip() == "all-casing":
__lowercase = re.sub(obja.lower() , obja.lower() , A__ )
__lowercase = re.sub(obja.upper() , obja.upper() , A__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__lowercase = blackify(lines[start_index - 1] + theoretical_code )
__lowercase = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__lowercase = lines[:start_index] + [theoretical_code] + lines[line_index:]
__lowercase = start_index + 1
if overwrite and len(A__ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(A__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(A__ )
return diffs
def _A ( A__ = False ):
"""simple docstring"""
__lowercase = glob.glob(os.path.join(A__ , '''**/*.py''' ) , recursive=A__ )
__lowercase = []
for filename in all_files:
__lowercase = is_copy_consistent(A__ , A__ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(A__ ) > 0:
__lowercase = '''\n'''.join(A__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCAmelCase__ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 104 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase__ = CLIPImageProcessor()
lowerCAmelCase__ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
lowerCAmelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 104 | 1 |
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER', 'False' ) ) is not True, reason='Skipping test because should only be run when releasing minor transformers version', )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_600, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
def snake_case__ ( self ):
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=lowerCamelCase__ , )
assert hasattr(self , '''env''' )
def snake_case__ ( self , lowerCamelCase__ ):
# configuration for running training on smdistributed Model Parallel
_lowerCamelCase = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_lowerCamelCase = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_lowerCamelCase = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_lowerCamelCase = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCamelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCamelCase__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCamelCase__ , py_version='''py36''' , )
def snake_case__ ( self , lowerCamelCase__ ):
TrainingJobAnalytics(lowerCamelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def snake_case__ ( self , lowerCamelCase__ ):
# create estimator
_lowerCamelCase = self.create_estimator(lowerCamelCase__ )
# run training
estimator.fit()
# result dataframe
_lowerCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_lowerCamelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowerCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCamelCase__ )
| 73 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''sail/poolformer_s12''': '''https://huggingface.co/sail/poolformer_s12/resolve/main/config.json''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = 'poolformer'
def __init__( self , lowerCamelCase__=3 , lowerCamelCase__=1_6 , lowerCamelCase__=1_6 , lowerCamelCase__=3 , lowerCamelCase__=4.0 , lowerCamelCase__=[2, 2, 6, 2] , lowerCamelCase__=[6_4, 1_2_8, 3_2_0, 5_1_2] , lowerCamelCase__=[7, 3, 3, 3] , lowerCamelCase__=[4, 2, 2, 2] , lowerCamelCase__=[2, 1, 1, 1] , lowerCamelCase__=4 , lowerCamelCase__=0.0 , lowerCamelCase__="gelu" , lowerCamelCase__=True , lowerCamelCase__=1e-5 , lowerCamelCase__=0.0_2 , **lowerCamelCase__ , ):
_lowerCamelCase = num_channels
_lowerCamelCase = patch_size
_lowerCamelCase = stride
_lowerCamelCase = padding
_lowerCamelCase = pool_size
_lowerCamelCase = hidden_sizes
_lowerCamelCase = mlp_ratio
_lowerCamelCase = depths
_lowerCamelCase = patch_sizes
_lowerCamelCase = strides
_lowerCamelCase = num_encoder_blocks
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_layer_scale
_lowerCamelCase = layer_scale_init_value
_lowerCamelCase = initializer_range
super().__init__(**lowerCamelCase__ )
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : str = version.parse('1.11' )
@property
def snake_case__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def snake_case__ ( self ):
return 2e-3
| 73 | 1 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE :int = '''bart'''
SCREAMING_SNAKE_CASE :int = True
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Optional[int]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
snake_case_ = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
snake_case_ = qar_model.eval()
else:
snake_case_ = (None, None)
if MODEL_TYPE == "bart":
snake_case_ = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
snake_case_ = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
snake_case_ = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
snake_case_ = sas_model.eval()
else:
snake_case_ = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->Union[str, Any]:
'''simple docstring'''
if LOAD_DENSE_INDEX:
snake_case_ = faiss.StandardGpuResources()
snake_case_ = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["""train"""]
snake_case_ = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 128) , )
snake_case_ = faiss.IndexFlatIP(128 )
snake_case_ = faiss.index_cpu_to_gpu(lowerCAmelCase_ , 1 , lowerCAmelCase_ )
wikiaab_gpu_index_flat.add(lowerCAmelCase_ ) # TODO fix for larger GPU
else:
snake_case_ = (None, None)
snake_case_ = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=lowerCAmelCase_ )
def _lowerCAmelCase ( )->str:
'''simple docstring'''
snake_case_ = datasets.load_dataset("eli5" , name="LFQA_reddit" )
snake_case_ = elia["""train_eli5"""]
snake_case_ = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 128) )
snake_case_ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(lowerCAmelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE :List[Any] = load_indexes()
SCREAMING_SNAKE_CASE :Dict = load_models()
SCREAMING_SNAKE_CASE :str = load_train_data()
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :Any=10 )->Any:
'''simple docstring'''
snake_case_ = embed_questions_for_retrieval([question] , lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = eli5_train_q_index.search(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case_ = [elia_train[int(lowerCAmelCase_ )] for i in I[0]]
return nn_examples
def _lowerCAmelCase ( lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :List[str]="wiki40b" , lowerCAmelCase_ :str="dense" , lowerCAmelCase_ :Tuple=10 )->List[str]:
'''simple docstring'''
if source == "none":
snake_case_ = (""" <P> """.join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
snake_case_ = query_qa_dense_index(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ = query_es_index(
lowerCAmelCase_ , lowerCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=lowerCAmelCase_ , )
snake_case_ = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
snake_case_ = """question: {} context: {}""".format(lowerCAmelCase_ , lowerCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda lowerCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda lowerCAmelCase_ : None),
} )
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Union[str, Any] , lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Optional[Any]=64 , lowerCAmelCase_ :int=256 , lowerCAmelCase_ :Dict=False , lowerCAmelCase_ :str=2 , lowerCAmelCase_ :str=0.9_5 , lowerCAmelCase_ :Dict=0.8 )->Dict:
'''simple docstring'''
with torch.no_grad():
snake_case_ = qa_sas_generate(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , num_answers=1 , num_beams=lowerCAmelCase_ , min_len=lowerCAmelCase_ , max_len=lowerCAmelCase_ , do_sample=lowerCAmelCase_ , temp=lowerCAmelCase_ , top_p=lowerCAmelCase_ , top_k=lowerCAmelCase_ , max_input_length=1_024 , device="cuda:0" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
SCREAMING_SNAKE_CASE :List[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
SCREAMING_SNAKE_CASE :Union[str, Any] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE :List[str] = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE :Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
SCREAMING_SNAKE_CASE :Tuple = st.sidebar.checkbox('''Demo options''')
if demo_options:
SCREAMING_SNAKE_CASE :str = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
SCREAMING_SNAKE_CASE :str = action_list.index(action_st)
SCREAMING_SNAKE_CASE :int = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
SCREAMING_SNAKE_CASE :str = show_type == '''Show full text of passages'''
else:
SCREAMING_SNAKE_CASE :str = 3
SCREAMING_SNAKE_CASE :List[Any] = True
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
SCREAMING_SNAKE_CASE :Any = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE :int = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
SCREAMING_SNAKE_CASE :List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
SCREAMING_SNAKE_CASE :Optional[Any] = '''wiki40b'''
SCREAMING_SNAKE_CASE :Any = '''dense'''
SCREAMING_SNAKE_CASE :int = '''beam'''
SCREAMING_SNAKE_CASE :Optional[int] = 2
SCREAMING_SNAKE_CASE :Optional[Any] = 64
SCREAMING_SNAKE_CASE :str = 2_56
SCREAMING_SNAKE_CASE :Any = None
SCREAMING_SNAKE_CASE :List[Any] = None
SCREAMING_SNAKE_CASE :str = st.sidebar.checkbox('''Generation options''')
if generate_options:
SCREAMING_SNAKE_CASE :Optional[Any] = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
SCREAMING_SNAKE_CASE :int = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_56, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider(
'''Maximum generation length''', min_value=64, max_value=5_12, value=2_56, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE :Optional[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE :int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Tuple = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE :Optional[int] = None
# start main text
SCREAMING_SNAKE_CASE :Optional[int] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
SCREAMING_SNAKE_CASE :List[Any] = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE :Union[str, Any] = st.text_input('''Enter your question here:''', '''''')
else:
SCREAMING_SNAKE_CASE :Optional[Any] = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE :Dict = make_support(question, source=wiki_source, method='''dense''', n_results=10)
SCREAMING_SNAKE_CASE :Union[str, Any] = make_support(question, source=wiki_source, method='''sparse''', n_results=10)
SCREAMING_SNAKE_CASE :Tuple = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE :Any = support_list[:10]
SCREAMING_SNAKE_CASE :Optional[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE :Union[str, Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE :str = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE :Any = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
SCREAMING_SNAKE_CASE :List[Any] = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE :Union[str, Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE :str = sec_titles.split(''' & ''')
SCREAMING_SNAKE_CASE :str = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE :Any = find_nearest_training(question)
SCREAMING_SNAKE_CASE :Optional[int] = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
SCREAMING_SNAKE_CASE :Dict = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
SCREAMING_SNAKE_CASE :int = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 159 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCAmelCase_ : int = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCAmelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCAmelCase_ : int = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Any="binary" ) -> Dict:
"""simple docstring"""
UpperCamelCase :List[str] = simple_accuracy(__magic_name__ , __magic_name__ )
UpperCamelCase :Dict = float(fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average=__magic_name__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = {}
for id_pred, label in zip(__magic_name__ , __magic_name__ ):
UpperCamelCase :str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
UpperCamelCase :Union[str, Any] = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCamelCase :Dict = [(pred, label)]
UpperCamelCase , UpperCamelCase :Optional[int] = [], []
for question, preds_labels in question_map.items():
UpperCamelCase , UpperCamelCase :Optional[Any] = zip(*__magic_name__ )
UpperCamelCase :Optional[int] = fa_score(y_true=__magic_name__ , y_pred=__magic_name__ , average="""macro""" )
fas.append(__magic_name__ )
UpperCamelCase :int = int(sum(pred == label for pred, label in preds_labels ) == len(__magic_name__ ) )
ems.append(__magic_name__ )
UpperCamelCase :Optional[int] = float(sum(__magic_name__ ) / len(__magic_name__ ) )
UpperCamelCase :str = sum(__magic_name__ ) / len(__magic_name__ )
UpperCamelCase :Tuple = float(fa_score(y_true=__magic_name__ , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _A ( self : str ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None , )
def _A ( self : Optional[Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def _A ( self : List[str] , __lowerCamelCase : Tuple , __lowerCamelCase : str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__lowerCamelCase , __lowerCamelCase )}
elif self.config_name == "cb":
return acc_and_fa(__lowerCamelCase , __lowerCamelCase , fa_avg="""macro""" )
elif self.config_name == "record":
UpperCamelCase :Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
UpperCamelCase :Tuple = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__lowerCamelCase , __lowerCamelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__lowerCamelCase , __lowerCamelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__lowerCamelCase , __lowerCamelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 38 | 0 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 267 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece_no_bos.model""")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = '''</s>'''
snake_case_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(_UpperCAmelCase ) , 11_03 )
def UpperCamelCase__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
snake_case_ = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
snake_case_ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def UpperCamelCase__ ( self ):
snake_case_ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
snake_case_ = '''To ensure a smooth flow of bank resolutions.'''
snake_case_ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
snake_case_ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 1_50, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def UpperCamelCase__ ( self ):
# fmt: off
snake_case_ = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def UpperCamelCase__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def UpperCamelCase__ ( self , **_UpperCAmelCase ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCamelCase__ ( self , _UpperCAmelCase ):
return ("This is a test", "This is a test")
def UpperCamelCase__ ( self ):
snake_case_ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = self.tokenizer_class.from_pretrained(self.tmpdirname )
snake_case_ = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
snake_case_ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
snake_case_ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def UpperCamelCase__ ( self ):
snake_case_ = ['''This is going to be way too long.''' * 10_00, '''short example''']
snake_case_ = ['''not super long but more than 5 tokens''', '''tiny''']
snake_case_ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
snake_case_ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def UpperCamelCase__ ( self ):
snake_case_ = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
snake_case_ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , ) | 267 | 1 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> list:
lowercase : Any = []
lowercase , lowercase : List[str] = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase : Optional[Any] = result + left + right
return input_list
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list:
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return input_list
lowercase : Dict = list(SCREAMING_SNAKE_CASE__ )
# iteration for two-way merging
lowercase : Optional[int] = 2
while p <= len(SCREAMING_SNAKE_CASE__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = i
lowercase : Optional[int] = i + p - 1
lowercase : Optional[Any] = (low + high + 1) // 2
lowercase : Optional[Any] = merge(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE__ ):
lowercase : Tuple = i
lowercase : Any = merge(SCREAMING_SNAKE_CASE__ , 0 , SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase : List[Any] = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowercase : List[str] = []
else:
lowercase : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 20 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __snake_case ( lowerCAmelCase ):
def __init__( self ,**snake_case ):
'''simple docstring'''
super().__init__(**snake_case )
if self.framework != "pt":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
# No specific FOR_XXX available yet
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(snake_case ,**snake_case )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
lowercase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
lowercase : List[str] = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
lowercase : Dict = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ,snake_case="This is a sound of {}." ):
'''simple docstring'''
if isinstance(snake_case ,snake_case ):
if audio.startswith("""http://""" ) or audio.startswith("""https://""" ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowercase : Optional[Any] = requests.get(snake_case ).content
else:
with open(snake_case ,"""rb""" ) as f:
lowercase : Union[str, Any] = f.read()
if isinstance(snake_case ,snake_case ):
lowercase : int = ffmpeg_read(snake_case ,self.feature_extractor.sampling_rate )
if not isinstance(snake_case ,np.ndarray ):
raise ValueError("""We expect a numpy ndarray as input""" )
if len(audio.shape ) != 1:
raise ValueError("""We expect a single channel audio input for ZeroShotAudioClassificationPipeline""" )
lowercase : Dict = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors="""pt""" )
lowercase : Tuple = candidate_labels
lowercase : Tuple = [hypothesis_template.format(snake_case ) for x in candidate_labels]
lowercase : Optional[Any] = self.tokenizer(snake_case ,return_tensors=self.framework ,padding=snake_case )
lowercase : Optional[Any] = [text_inputs]
return inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[str] = model_inputs.pop("""candidate_labels""" )
lowercase : Dict = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,snake_case ):
lowercase : List[Any] = text_inputs[0]
else:
# Batching case.
lowercase : Dict = text_inputs[0][0]
lowercase : Optional[Any] = self.model(**snake_case ,**snake_case )
lowercase : Any = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_audio,
}
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : List[Any] = model_outputs.pop("""candidate_labels""" )
lowercase : Any = model_outputs["""logits"""][0]
if self.framework == "pt":
lowercase : Any = logits.softmax(dim=0 )
lowercase : Tuple = probs.tolist()
else:
raise ValueError("""`tf` framework not supported.""" )
lowercase : Tuple = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(snake_case ,snake_case ) ,key=lambda snake_case : -x[0] )
]
return result
| 20 | 1 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] ):
lowerCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()]
lowerCAmelCase = [x.strip() for x in open(_UpperCAmelCase ).readlines()][: len(_UpperCAmelCase )]
lowerCAmelCase = calculate_rouge(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
if save_path is not None:
save_json(_UpperCAmelCase , _UpperCAmelCase , indent=_UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 309 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Any = {
'''configuration_layoutlmv2''': ['''LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LayoutLMv2Config'''],
'''processing_layoutlmv2''': ['''LayoutLMv2Processor'''],
'''tokenization_layoutlmv2''': ['''LayoutLMv2Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Tuple = ['''LayoutLMv2TokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = ['''LayoutLMv2FeatureExtractor''']
__UpperCamelCase : Optional[int] = ['''LayoutLMv2ImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv2ForQuestionAnswering''',
'''LayoutLMv2ForSequenceClassification''',
'''LayoutLMv2ForTokenClassification''',
'''LayoutLMv2Layer''',
'''LayoutLMv2Model''',
'''LayoutLMv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 309 | 1 |
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def __lowercase ( a__ ) -> Tuple:
if isinstance(a__ , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
def _A ( self , _A , _A ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
def _A ( self ):
'''simple docstring'''
pass
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(_A )
__SCREAMING_SNAKE_CASE = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(_A , _A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__SCREAMING_SNAKE_CASE = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(_A , _A )
__SCREAMING_SNAKE_CASE = {'vision_model': vision_model, 'text_model': text_model}
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__SCREAMING_SNAKE_CASE = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(_A , _A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__SCREAMING_SNAKE_CASE = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__SCREAMING_SNAKE_CASE = after_output[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-5 )
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(_A , _A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__SCREAMING_SNAKE_CASE = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__SCREAMING_SNAKE_CASE = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size )
__SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size )
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__SCREAMING_SNAKE_CASE = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__SCREAMING_SNAKE_CASE = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _A ( self , _A , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , f"""Difference between torch and flax is {diff} (>= {tol}).""" )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_pretrained_model_and_inputs()
__SCREAMING_SNAKE_CASE = model_a(**_A )
__SCREAMING_SNAKE_CASE = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained(_A )
__SCREAMING_SNAKE_CASE = model_a(**_A )
__SCREAMING_SNAKE_CASE = after_outputs[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1e-5 )
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFViTModel(_A , name='vision_model' )
__SCREAMING_SNAKE_CASE = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFViTModelTester(self )
__SCREAMING_SNAKE_CASE = TFBertModelTester(self )
__SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _A ( self , _A , _A , _A , _A , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.get_vision_text_model(_A , _A )
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__SCREAMING_SNAKE_CASE = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__SCREAMING_SNAKE_CASE = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.image_size )
__SCREAMING_SNAKE_CASE = to_atuple(vision_model.config.patch_size )
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__SCREAMING_SNAKE_CASE = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__SCREAMING_SNAKE_CASE = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModel(_A , name='vision_model' )
__SCREAMING_SNAKE_CASE = TFRobertaModel(_A , name='text_model' )
return vision_model, text_model
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE = TFRobertaModelTester(self )
__SCREAMING_SNAKE_CASE = vit_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__SCREAMING_SNAKE_CASE = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__SCREAMING_SNAKE_CASE = random_attention_mask([batch_size, 4] )
__SCREAMING_SNAKE_CASE = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def _A ( self , _A , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFCLIPVisionModel(_A , name='vision_model' )
__SCREAMING_SNAKE_CASE = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFCLIPVisionModelTester(self )
__SCREAMING_SNAKE_CASE = TFBertModelTester(self )
__SCREAMING_SNAKE_CASE = clip_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = bert_model_tester.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = vision_config_and_inputs
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_A )
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__SCREAMING_SNAKE_CASE = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__SCREAMING_SNAKE_CASE = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np' )
__SCREAMING_SNAKE_CASE = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__SCREAMING_SNAKE_CASE = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1e-3 ) )
| 257 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , _A = 16 , _A = 88 , _A = None , _A = 1 , _A = 0.0 , _A = 32 , _A = None , _A = False , _A = None , _A = None , _A = "geglu" , _A = None , ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_A , attention_head_dim=_A , in_channels=_A , num_layers=_A , dropout=_A , norm_num_groups=_A , cross_attention_dim=_A , attention_bias=_A , sample_size=_A , num_vector_embeds=_A , activation_fn=_A , num_embeds_ada_norm=_A , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
__SCREAMING_SNAKE_CASE = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
__SCREAMING_SNAKE_CASE = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
__SCREAMING_SNAKE_CASE = [1, 0]
def _A ( self , _A , _A , _A=None , _A=None , _A=None , _A = True , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
__SCREAMING_SNAKE_CASE = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
__SCREAMING_SNAKE_CASE = self.transformer_index_for_condition[i]
__SCREAMING_SNAKE_CASE = self.transformers[transformer_index](
_A , encoder_hidden_states=_A , timestep=_A , cross_attention_kwargs=_A , return_dict=_A , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
__SCREAMING_SNAKE_CASE = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
__SCREAMING_SNAKE_CASE = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_A )
| 257 | 1 |
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
__lowercase = get_tests_dir("""fixtures""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
# A mock response for an HTTP head request to emulate server down
a : Optional[Any] = mock.Mock()
a : Optional[int] = 500
a : Optional[int] = {}
a : str = HTTPError
a : Optional[int] = {}
# Download this model to make sure it's in the cache.
a : Union[str, Any] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__UpperCAmelCase) as mock_head:
a : str = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit")
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self : List[Any]):
# This test is for deprecated behavior and can be removed in v5
a : str = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json")
def __snake_case ( self : Union[str, Any]):
with self.assertRaises(__UpperCAmelCase):
# config is in subfolder, the following should not work without specifying the subfolder
a : Optional[int] = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants")
a : List[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor")
self.assertIsNotNone(__UpperCAmelCase)
@is_staging_test
class _A ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def __snake_case ( cls : Optional[Any]):
a : str = TOKEN
HfFolder.save_token(__UpperCAmelCase)
@classmethod
def __snake_case ( cls : Union[str, Any]):
try:
delete_repo(token=cls._token , repo_id="test-image-processor")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org")
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor")
except HTTPError:
pass
def __snake_case ( self : Optional[Any]):
a : Dict = ViTImageProcessor.from_pretrained(__UpperCAmelCase)
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token)
a : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCAmelCase , repo_id="test-image-processor" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token)
a : Union[str, Any] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''')
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : str):
a : Union[str, Any] = ViTImageProcessor.from_pretrained(__UpperCAmelCase)
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token)
a : Dict = ViTImageProcessor.from_pretrained("valid_org/test-image-processor")
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase))
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor")
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
__UpperCAmelCase , repo_id="valid_org/test-image-processor-org" , push_to_hub=__UpperCAmelCase , use_auth_token=self._token)
a : Optional[int] = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org")
for k, v in image_processor.__dict__.items():
self.assertEqual(__UpperCAmelCase , getattr(__UpperCAmelCase , __UpperCAmelCase))
def __snake_case ( self : Tuple):
CustomImageProcessor.register_for_auto_class()
a : Optional[Any] = CustomImageProcessor.from_pretrained(__UpperCAmelCase)
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
a : Tuple = AutoImageProcessor.from_pretrained(
f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=__UpperCAmelCase)
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor")
| 356 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : str = """char"""
UpperCAmelCase : Optional[Any] = """bpe"""
UpperCAmelCase : Optional[Any] = """wp"""
__lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = ["""image_processor""", """char_tokenizer"""]
UpperCAmelCase : Optional[Any] = """ViTImageProcessor"""
UpperCAmelCase : List[Any] = """MgpstrTokenizer"""
def __init__( self : List[Any] , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : str):
a : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __UpperCAmelCase , )
a : List[str] = kwargs.pop("feature_extractor")
a : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
a : Union[str, Any] = tokenizer
a : int = AutoTokenizer.from_pretrained("gpt2")
a : str = AutoTokenizer.from_pretrained("bert-base-uncased")
super().__init__(__UpperCAmelCase , __UpperCAmelCase)
def __call__( self : str , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : Union[str, Any]=None , **__UpperCAmelCase : int):
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process.")
if images is not None:
a : List[str] = self.image_processor(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is not None:
a : Optional[Any] = self.char_tokenizer(__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase)
if text is None:
return inputs
elif images is None:
return encodings
else:
a : Any = encodings["input_ids"]
return inputs
def __snake_case ( self : List[Any] , __UpperCAmelCase : List[str]):
a , a , a : Tuple = sequences
a : Optional[int] = char_preds.size(0)
a , a : Dict = self._decode_helper(__UpperCAmelCase , "char")
a , a : Dict = self._decode_helper(__UpperCAmelCase , "bpe")
a , a : Union[str, Any] = self._decode_helper(__UpperCAmelCase , "wp")
a : Any = []
a : Union[str, Any] = []
for i in range(__UpperCAmelCase):
a : Any = [char_scores[i], bpe_scores[i], wp_scores[i]]
a : Optional[Any] = [char_strs[i], bpe_strs[i], wp_strs[i]]
a : List[str] = scores.index(max(__UpperCAmelCase))
final_strs.append(strs[max_score_index])
final_scores.append(scores[max_score_index])
a : Dict = {}
a : List[str] = final_strs
a : str = final_scores
a : int = char_strs
a : int = bpe_strs
a : Tuple = wp_strs
return out
def __snake_case ( self : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str]):
if format == DecodeType.CHARACTER:
a : int = self.char_decode
a : int = 1
a : Dict = "[s]"
elif format == DecodeType.BPE:
a : List[str] = self.bpe_decode
a : List[str] = 2
a : int = "#"
elif format == DecodeType.WORDPIECE:
a : Union[str, Any] = self.wp_decode
a : List[str] = 102
a : int = "[SEP]"
else:
raise ValueError(f'''Format {format} is not supported.''')
a , a : str = [], []
a : Optional[int] = pred_logits.size(0)
a : List[str] = pred_logits.size(1)
a , a : Tuple = pred_logits.topk(1 , dim=-1 , largest=__UpperCAmelCase , sorted=__UpperCAmelCase)
a : List[str] = preds_index.view(-1 , __UpperCAmelCase)[:, 1:]
a : Any = decoder(__UpperCAmelCase)
a , a : Union[str, Any] = torch.nn.functional.softmax(__UpperCAmelCase , dim=2).max(dim=2)
a : Union[str, Any] = preds_max_prob[:, 1:]
for index in range(__UpperCAmelCase):
a : str = preds_str[index].find(__UpperCAmelCase)
a : Optional[Any] = preds_str[index][:pred_eos]
a : Optional[int] = preds_index[index].cpu().tolist()
a : Optional[int] = pred_index.index(__UpperCAmelCase) if eos_token in pred_index else -1
a : List[str] = preds_max_prob[index][: pred_eos_index + 1]
a : int = pred_max_prob.cumprod(dim=0)[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(__UpperCAmelCase)
conf_scores.append(__UpperCAmelCase)
return dec_strs, conf_scores
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Any):
a : Dict = [seq.replace(" " , "") for seq in self.char_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
def __snake_case ( self : Optional[int] , __UpperCAmelCase : List[str]):
return self.bpe_tokenizer.batch_decode(__UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : int):
a : Any = [seq.replace(" " , "") for seq in self.wp_tokenizer.batch_decode(__UpperCAmelCase)]
return decode_strs
| 226 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__A ={
'''configuration_ernie''': ['''ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ErnieConfig''', '''ErnieOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'''ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ErnieForCausalLM''',
'''ErnieForMaskedLM''',
'''ErnieForMultipleChoice''',
'''ErnieForNextSentencePrediction''',
'''ErnieForPreTraining''',
'''ErnieForQuestionAnswering''',
'''ErnieForSequenceClassification''',
'''ErnieForTokenClassification''',
'''ErnieModel''',
'''ErniePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 |
import math
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ = 0 , lowerCamelCase__ = 0 ):
lowerCamelCase_ = end or len(lowerCamelCase__ )
for i in range(lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = i
lowerCamelCase_ = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase_ = array[temp_index - 1]
temp_index -= 1
lowerCamelCase_ = temp_index_value
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ): # Max Heap
lowerCamelCase_ = index
lowerCamelCase_ = 2 * index + 1 # Left Node
lowerCamelCase_ = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase_ = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase_ = right_index
if largest != index:
lowerCamelCase_ , lowerCamelCase_ = array[largest], array[index]
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = len(lowerCamelCase__ )
for i in range(n // 2 , -1 , -1 ):
heapify(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase_ , lowerCamelCase_ = array[0], array[i]
heapify(lowerCamelCase__ , 0 , lowerCamelCase__ )
return array
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = low
lowerCamelCase_ = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase_ , lowerCamelCase_ = array[j], array[i]
i += 1
def lowerCamelCase_ ( lowerCamelCase__ ):
if len(lowerCamelCase__ ) == 0:
return array
lowerCamelCase_ = 2 * math.ceil(math.loga(len(lowerCamelCase__ ) ) )
lowerCamelCase_ = 1_6
return intro_sort(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ , lowerCamelCase__ )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(lowerCamelCase__ )
max_depth -= 1
lowerCamelCase_ = median_of_a(lowerCamelCase__ , lowerCamelCase__ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase_ = partition(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
intro_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase_ = p
return insertion_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__A =input('''Enter numbers separated by a comma : ''').strip()
__A =[float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 19 | 1 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> tuple[complex, complex]:
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
snake_case_ = b * b - 4 * a * c
snake_case_ = (-b + sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
snake_case_ = (-b - sqrt(_SCREAMING_SNAKE_CASE )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Any:
snake_case_ , snake_case_ = quadratic_roots(a=5 , b=6 , c=1 )
print(f"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 360 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 233 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=18 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=True , ):
"""simple docstring"""
_snake_case = size if size is not None else {'height': 18, 'width': 18}
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = min_resolution
_snake_case = max_resolution
_snake_case = do_resize
_snake_case = size
_snake_case = apply_ocr
def lowerCamelCase ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , 'do_resize' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'size' ) )
self.assertTrue(hasattr(lowerCAmelCase_ , 'apply_ocr' ) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowerCAmelCase_ )
self.assertIsInstance(encoding.boxes , lowerCAmelCase_ )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
_snake_case = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
_snake_case = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_snake_case = Image.open(ds[0]['file'] ).convert('RGB' )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_snake_case = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowerCAmelCase_ )
self.assertListEqual(encoding.boxes , lowerCAmelCase_ )
# with apply_OCR = False
_snake_case = LayoutLMvaImageProcessor(apply_ocr=lowerCAmelCase_ )
_snake_case = image_processing(lowerCAmelCase_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 42 |
'''simple docstring'''
from PIL import Image
def __lowerCamelCase ( _lowercase , _lowercase ) -> Image:
def brightness(_lowercase ) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -255.0 <= level <= 255.0:
raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" )
return img.point(_lowercase )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
a : Optional[Any] = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 265 | 0 |
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _UpperCAmelCase (UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : NDArray[floataa] , UpperCamelCase_ : list[int] , UpperCamelCase_ : int , ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Dict = coefficient_matrix.shape
_lowerCAmelCase , _lowerCAmelCase : str = constant_matrix.shape
if rowsa != colsa:
_lowerCAmelCase : List[str] = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase_ )
if colsa != 1:
_lowerCAmelCase : Union[str, Any] = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(UpperCamelCase_ )
if rowsa != rowsa:
_lowerCAmelCase : Optional[int] = (
"""Coefficient and constant matrices dimensions must be nxn and nx1 but """
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(UpperCamelCase_ )
if len(UpperCamelCase_ ) != rowsa:
_lowerCAmelCase : Optional[Any] = (
"""Number of initial values must be equal to number of rows in coefficient """
F"matrix but received {len(UpperCamelCase_ )} and {rowsa}"
)
raise ValueError(UpperCamelCase_ )
if iterations <= 0:
raise ValueError("""Iterations must be at least 1""" )
_lowerCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = table.shape
strictly_diagonally_dominant(UpperCamelCase_ )
# Iterates the whole matrix for given number of times
for _ in range(UpperCamelCase_ ):
_lowerCAmelCase : Dict = []
for row in range(UpperCamelCase_ ):
_lowerCAmelCase : Tuple = 0
for col in range(UpperCamelCase_ ):
if col == row:
_lowerCAmelCase : List[str] = table[row][col]
elif col == cols - 1:
_lowerCAmelCase : Tuple = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
_lowerCAmelCase : List[str] = (temp + val) / denom
new_val.append(UpperCamelCase_ )
_lowerCAmelCase : Dict = new_val
return [float(UpperCamelCase_ ) for i in new_val]
def _UpperCAmelCase (UpperCamelCase_ : NDArray[floataa] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = table.shape
_lowerCAmelCase : Union[str, Any] = True
for i in range(0 , UpperCamelCase_ ):
_lowerCAmelCase : List[str] = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("""Coefficient matrix is not strictly diagonally dominant""" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 159 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _UpperCAmelCase (UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Optional[int] = 1 / (2 * pi * sigma) * exp(-(square(UpperCamelCase_ ) + square(UpperCamelCase_ )) / (2 * square(UpperCamelCase_ )) )
return g
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[Any] = height - k_size + 1
_lowerCAmelCase : Tuple = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(UpperCamelCase_ ) , range(UpperCamelCase_ ) ):
_lowerCAmelCase : Optional[int] = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Dict = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : Optional[Any] = gen_gaussian_kernel(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Dict = ravel(UpperCamelCase_ )
# reshape and get the dst image
_lowerCAmelCase : List[str] = dot(UpperCamelCase_ , UpperCamelCase_ ).reshape(UpperCamelCase_ , UpperCamelCase_ ).astype(UpperCamelCase_ )
return dst
if __name__ == "__main__":
# read original image
_lowerCamelCase : int = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowerCamelCase : List[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCamelCase : int = gaussian_filter(gray, 3, sigma=1)
_lowerCamelCase : List[str] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 159 | 1 |
def A ( lowercase , lowercase ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = [0 for i in range(r + 1 )]
# nc0 = 1
UpperCamelCase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
UpperCamelCase = min(lowercase , lowercase )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 222 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : List[str] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = "gpt_neox"
def __init__( self , A_=50_432 , A_=6_144 , A_=44 , A_=64 , A_=24_576 , A_="gelu" , A_=0.25 , A_=10_000 , A_=0.0 , A_=0.0 , A_=0.1 , A_=2_048 , A_=0.02 , A_=1e-5 , A_=True , A_=0 , A_=2 , A_=False , A_=True , A_=None , **A_ , ) -> Tuple:
"""simple docstring"""
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = rotary_pct
UpperCamelCase = rotary_emb_base
UpperCamelCase = attention_dropout
UpperCamelCase = hidden_dropout
UpperCamelCase = classifier_dropout
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = tie_word_embeddings
UpperCamelCase = use_parallel_residual
UpperCamelCase = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'''got {self.rope_scaling}''' )
UpperCamelCase = self.rope_scaling.get('type' , A_ )
UpperCamelCase = self.rope_scaling.get('factor' , A_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(A_ , A_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 222 | 1 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE :Any = TypeVar('KEY')
SCREAMING_SNAKE_CASE :Optional[Any] = TypeVar('VAL')
@dataclass(frozen=_a , slots=_a )
class UpperCAmelCase ( Generic[KEY, VAL] ):
'''simple docstring'''
snake_case_ = 42
snake_case_ = 42
class UpperCAmelCase ( _Item ):
'''simple docstring'''
def __init__( self : List[Any] ):
super().__init__(__lowerCamelCase ,__lowerCamelCase )
def __bool__( self : Union[str, Any] ):
return False
SCREAMING_SNAKE_CASE :Optional[int] = _DeletedItem()
class UpperCAmelCase ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : str ,A : int = 8 ,A : float = 0.75 ):
__A = initial_block_size
__A = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__A = capacity_factor
__A = 0
def UpperCamelCase_ ( self : List[str] ,A : KEY ):
return hash(__lowerCamelCase ) % len(self._buckets )
def UpperCamelCase_ ( self : Tuple ,A : int ):
return (ind + 1) % len(self._buckets )
def UpperCamelCase_ ( self : Optional[int] ,A : int ,A : KEY ,A : VAL ):
__A = self._buckets[ind]
if not stored:
__A = _Item(__lowerCamelCase ,__lowerCamelCase )
self._len += 1
return True
elif stored.key == key:
__A = _Item(__lowerCamelCase ,__lowerCamelCase )
return True
else:
return False
def UpperCamelCase_ ( self : List[str] ):
__A = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__lowerCamelCase )
def UpperCamelCase_ ( self : Dict ):
if len(self._buckets ) <= self._initial_block_size:
return False
__A = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase_ ( self : Any ,A : int ):
__A = self._buckets
__A = [None] * new_size
__A = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def UpperCamelCase_ ( self : Union[str, Any] ):
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase_ ( self : Dict ):
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase_ ( self : List[Any] ,A : KEY ):
__A = self._get_bucket_index(__lowerCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
__A = self._get_next_ind(__lowerCamelCase )
def UpperCamelCase_ ( self : int ,A : KEY ,A : VAL ):
for ind in self._iterate_buckets(__lowerCamelCase ):
if self._try_set(__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ):
break
def __setitem__( self : Any ,A : KEY ,A : VAL ):
if self._is_full():
self._size_up()
self._add_item(__lowerCamelCase ,__lowerCamelCase )
def __delitem__( self : List[Any] ,A : KEY ):
for ind in self._iterate_buckets(__lowerCamelCase ):
__A = self._buckets[ind]
if item is None:
raise KeyError(__lowerCamelCase )
if item is _deleted:
continue
if item.key == key:
__A = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Optional[Any] ,A : KEY ):
for ind in self._iterate_buckets(__lowerCamelCase ):
__A = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__lowerCamelCase )
def __len__( self : List[str] ):
return self._len
def __iter__( self : List[str] ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[Any] ):
__A = """ ,""".join(
f'''{item.key}: {item.val}''' for item in self._buckets if item )
return f'''HashMap({val_string})'''
| 368 |
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE :int = 'examples/'
SCREAMING_SNAKE_CASE :str = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
SCREAMING_SNAKE_CASE :int = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
SCREAMING_SNAKE_CASE :List[str] = 'README.md'
def UpperCAmelCase ( a_ , a_ , a_ ) -> Tuple:
"""simple docstring"""
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.read()
__A , __A = REPLACE_PATTERNS[pattern]
__A = replace.replace("VERSION" , a_ )
__A = re_pattern.sub(a_ , a_ )
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(a_ )
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(a_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(a_ , a_ ) , a_ , pattern="examples" )
def UpperCAmelCase ( a_ , a_=False ) -> str:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(a_ , a_ , a_ )
if not patch:
update_version_in_examples(a_ )
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = "🤗 Transformers currently provides the following architectures"
__A = "1. Want to contribute a new model?"
with open(a_ , "r" , encoding="utf-8" , newline="\n" ) as f:
__A = f.readlines()
# Find the start of the list.
__A = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__A = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
__A = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(a_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(a_ )
def UpperCAmelCase ( ) -> Dict:
"""simple docstring"""
with open(REPLACE_FILES["init"] , "r" ) as f:
__A = f.read()
__A = REPLACE_PATTERNS["init"][0].search(a_ ).groups()[0]
return packaging.version.parse(a_ )
def UpperCAmelCase ( a_=False ) -> str:
"""simple docstring"""
__A = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
__A = default_version.base_version
elif patch:
__A = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
__A = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
__A = input(F'''Which version are you releasing? [{default_version}]''' )
if len(a_ ) == 0:
__A = default_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ , patch=a_ )
def UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
__A = get_version()
__A = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
__A = current_version.base_version
# Check with the user we got that right.
__A = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(a_ ) == 0:
__A = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(a_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
SCREAMING_SNAKE_CASE :List[str] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 124 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : Optional[Any] = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase ( __a ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self : Optional[Any] , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Tuple ) -> bool:
"""simple docstring"""
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[Any] , __a : int , __a : Optional[int] = None ) -> List[Any]:
"""simple docstring"""
__lowercase : str = max_length
__lowercase : Tuple = max_position_embeddings
@add_start_docstrings(__a )
def __call__( self : Tuple , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Any ) -> bool:
"""simple docstring"""
__lowercase : Dict = input_ids.shape[-1]
__lowercase : Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : List[str] , __a : int , __a : int ) -> Any:
"""simple docstring"""
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , __a , )
__lowercase : List[str] = start_length
__lowercase : List[Any] = max_new_tokens
__lowercase : List[str] = start_length + max_new_tokens
@add_start_docstrings(__a )
def __call__( self : List[str] , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : List[Any] ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase ( __a ):
'''simple docstring'''
def __init__( self : Dict , __a : float , __a : Optional[float] = None ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = max_time
__lowercase : List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__a )
def __call__( self : str , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Any ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase ( __a ):
'''simple docstring'''
@add_start_docstrings(__a )
def __call__( self : Any , __a : torch.LongTensor , __a : torch.FloatTensor , **__a : Tuple ) -> bool:
"""simple docstring"""
return any(criteria(__a , __a ) for criteria in self )
@property
def lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__a , __a ):
return stopping_criterium.max_length
elif isinstance(__a , __a ):
return stopping_criterium.max_length
return None
def snake_case_ ( lowerCAmelCase_ : StoppingCriteriaList , lowerCAmelCase_ : int ):
__lowercase : Tuple = stopping_criteria.max_length
__lowercase : int = deepcopy(lowerCAmelCase_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , lowerCAmelCase_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=lowerCAmelCase_ ) )
return new_stopping_criteria | 233 |
from itertools import permutations
def snake_case_ ( lowerCAmelCase_ : tuple ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
__lowercase : Dict = [7, 11, 13, 17]
for i, test in enumerate(lowerCAmelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case_ ( lowerCAmelCase_ : int = 10 ):
return sum(
int("""""".join(map(lowerCAmelCase_ , lowerCAmelCase_ ) ) )
for num in permutations(range(lowerCAmelCase_ ) )
if is_substring_divisible(lowerCAmelCase_ ) )
if __name__ == "__main__":
print(f'''{solution() = }''') | 233 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowerCamelCase( yaml.SafeLoader ):
def UpperCamelCase ( self, lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[str] = [self.constructed_objects[key_node] for key_node, _ in node.value]
_lowercase : Dict = [tuple(lowerCamelCase) if isinstance(lowerCamelCase, lowerCamelCase) else key for key in keys]
_lowercase : Optional[Any] = Counter(lowerCamelCase)
_lowercase : Tuple = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''')
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=False) -> Any:
"""simple docstring"""
_lowercase : str = super().construct_mapping(lowerCamelCase, deep=lowerCamelCase)
self._check_no_duplicates_on_constructed_node(lowerCamelCase)
return mapping
def UpperCamelCase_( lowerCamelCase_ ) -> Tuple[Optional[str], str]:
_lowercase : List[Any] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_lowercase : List[Any] = full_content[1:].index('---' ) + 1
_lowercase : Any = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(lowerCamelCase_ )
class _lowerCamelCase( _a ):
# class attributes
lowercase_ : int = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> "DatasetMetadata":
"""simple docstring"""
with open(lowerCamelCase, encoding='utf-8') as readme_file:
_lowercase : int = _split_yaml_from_readme(readme_file.read())
if yaml_string is not None:
return cls.from_yaml_string(lowerCamelCase)
else:
return cls()
def UpperCamelCase ( self, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if path.exists():
with open(lowerCamelCase, encoding='utf-8') as readme_file:
_lowercase : Tuple = readme_file.read()
else:
_lowercase : List[Any] = None
_lowercase : Union[str, Any] = self._to_readme(lowerCamelCase)
with open(lowerCamelCase, 'w', encoding='utf-8') as readme_file:
readme_file.write(lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase = None) -> str:
"""simple docstring"""
if readme_content is not None:
_lowercase : Union[str, Any] = _split_yaml_from_readme(lowerCamelCase)
_lowercase : int = '---\n' + self.to_yaml_string() + '---\n' + content
else:
_lowercase : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCamelCase ( cls, lowerCamelCase) -> "DatasetMetadata":
"""simple docstring"""
_lowercase : Any = yaml.load(lowerCamelCase, Loader=_NoDuplicateSafeLoader) or {}
# Convert the YAML keys to DatasetMetadata fields
_lowercase : Optional[int] = {
(key.replace('-', '_') if key.replace('-', '_') in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**lowerCamelCase)
def UpperCamelCase ( self) -> str:
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace('_', '-') if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
}, sort_keys=lowerCamelCase, allow_unicode=lowerCamelCase, encoding='utf-8', ).decode('utf-8')
SCREAMING_SNAKE_CASE : Any = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
SCREAMING_SNAKE_CASE : Any = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
SCREAMING_SNAKE_CASE : List[str] = ap.parse_args()
SCREAMING_SNAKE_CASE : List[Any] = Path(args.readme_filepath)
SCREAMING_SNAKE_CASE : Any = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 364 |
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def UpperCamelCase_( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 84 | 0 |
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
_SCREAMING_SNAKE_CASE = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowercase( UpperCamelCase_ ) -> str:
'''simple docstring'''
if isinstance(_a , torch.Tensor ):
return image
elif isinstance(_a , PIL.Image.Image ):
UpperCamelCase = [image]
UpperCamelCase = [trans(img.convert("""RGB""" ) ) for img in image]
UpperCamelCase = torch.stack(_a )
return image
class SCREAMING_SNAKE_CASE_ ( __snake_case ):
def __init__( self : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ):
"""simple docstring"""
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCamelCase_ ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = min(int(num_inference_steps * strength ) , lowerCamelCase_ )
UpperCamelCase = max(num_inference_steps - init_timestep , 0 )
UpperCamelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Union[str, Any]=None ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase_ )}""" )
UpperCamelCase = image.to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
if isinstance(lowerCamelCase_ , lowerCamelCase_ ) and len(lowerCamelCase_ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase_ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase = init_latents.shape
UpperCamelCase = randn_tensor(lowerCamelCase_ , generator=lowerCamelCase_ , device=lowerCamelCase_ , dtype=lowerCamelCase_ )
# get latents
print("""add noise to latents at timestep""" , lowerCamelCase_ )
UpperCamelCase = self.scheduler.add_noise(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : List[str] , lowerCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase_ : float = 0.8 , lowerCamelCase_ : int = 1 , lowerCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ : float = 0.0 , lowerCamelCase_ : int = 50 , lowerCamelCase_ : Optional[bool] = None , lowerCamelCase_ : Optional[str] = "pil" , lowerCamelCase_ : bool = True , ):
"""simple docstring"""
self.check_inputs(lowerCamelCase_ )
# 2. Preprocess image
UpperCamelCase = preprocess(lowerCamelCase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase_ , device=self.device )
UpperCamelCase = self.get_timesteps(lowerCamelCase_ , lowerCamelCase_ , self.device )
UpperCamelCase = timesteps[:1].repeat(lowerCamelCase_ )
# 4. Prepare latent variables
UpperCamelCase = self.prepare_latents(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , self.unet.dtype , self.device , lowerCamelCase_ )
UpperCamelCase = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase_ ):
# 1. predict noise model_output
UpperCamelCase = self.unet(lowerCamelCase_ , lowerCamelCase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , eta=lowerCamelCase_ , use_clipped_model_output=lowerCamelCase_ , generator=lowerCamelCase_ , ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase_ )
| 343 |
def lowerCamelCase_ ( _a : List[str] ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [0] * len(_a )
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : Dict = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_a ) ):
if indegree[i] == 0:
queue.append(_a )
while queue:
UpperCAmelCase_ : List[str] = queue.pop(0 )
cnt += 1
topo.append(_a )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_a )
if cnt != len(_a ):
print("""Cycle exists""" )
else:
print(_a )
# Adjacency List of Graph
UpperCamelCase_ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : Tuple = logging.get_logger(__name__)
snake_case__ : Optional[int] = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
snake_case__ : Any = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
snake_case__ : str = {
'''vinai/phobert-base''': 256,
'''vinai/phobert-large''': 256,
}
def _snake_case ( _snake_case : List[Any] ):
lowerCAmelCase : int = set()
lowerCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase : Dict = char
lowerCAmelCase : Union[str, Any] = set(_snake_case )
return pairs
class snake_case_( a__ ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str="<s>" , UpperCamelCase_ : Optional[int]="</s>" , UpperCamelCase_ : int="</s>" , UpperCamelCase_ : Any="<s>" , UpperCamelCase_ : int="<unk>" , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : str="<mask>" , **UpperCamelCase_ : Optional[Any] , ):
super().__init__(
bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : Any = vocab_file
lowerCAmelCase : Dict = merges_file
lowerCAmelCase : List[str] = {}
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : str = 1
lowerCAmelCase : Any = 2
lowerCAmelCase : Optional[int] = 3
self.add_from_file(UpperCamelCase_ )
lowerCAmelCase : Any = {v: k for k, v in self.encoder.items()}
with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase : List[Any] = merges_handle.read().split('''\n''' )[:-1]
lowerCAmelCase : List[Any] = [tuple(merge.split()[:-1] ) for merge in merges]
lowerCAmelCase : Any = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = {}
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
lowerCAmelCase : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None , UpperCamelCase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase_ )) + [1]
return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1]
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase__ ( self : Any ):
return len(self.encoder )
def lowerCamelCase__ ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase : str = tuple(UpperCamelCase_ )
lowerCAmelCase : List[str] = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase : Tuple = get_pairs(UpperCamelCase_ )
if not pairs:
return token
while True:
lowerCAmelCase : Optional[int] = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase, lowerCAmelCase : Any = bigram
lowerCAmelCase : Dict = []
lowerCAmelCase : Any = 0
while i < len(UpperCamelCase_ ):
try:
lowerCAmelCase : Optional[Any] = word.index(UpperCamelCase_ , UpperCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase : Tuple = j
if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase : Dict = tuple(UpperCamelCase_ )
lowerCAmelCase : Dict = new_word
if len(UpperCamelCase_ ) == 1:
break
else:
lowerCAmelCase : Optional[int] = get_pairs(UpperCamelCase_ )
lowerCAmelCase : Tuple = '''@@ '''.join(UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = word[:-4]
lowerCAmelCase : int = word
return word
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Tuple ):
lowerCAmelCase : Tuple = []
lowerCAmelCase : Dict = re.findall(r'''\S+\n?''' , UpperCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) )
return split_tokens
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : Optional[int] ):
return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) )
def lowerCamelCase__ ( self : int , UpperCamelCase_ : Union[str, Any] ):
return self.decoder.get(UpperCamelCase_ , self.unk_token )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Dict = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Union[str, Any] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase : Tuple = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.merges_file , UpperCamelCase_ )
return out_vocab_file, out_merge_file
def lowerCamelCase__ ( self : Dict , UpperCamelCase_ : Optional[int] ):
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
try:
with open(UpperCamelCase_ , '''r''' , encoding='''utf-8''' ) as fd:
self.add_from_file(UpperCamelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'''Incorrect encoding detected in {f}, please rebuild the dataset''' )
return
lowerCAmelCase : List[Any] = f.readlines()
for lineTmp in lines:
lowerCAmelCase : Any = lineTmp.strip()
lowerCAmelCase : List[Any] = line.rfind(''' ''' )
if idx == -1:
raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt>\'''' )
lowerCAmelCase : Dict = line[:idx]
lowerCAmelCase : List[str] = len(self.encoder )
| 314 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _snake_case ( _snake_case : List[str] ):
lowerCAmelCase : Union[str, Any] = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCAmelCase : Union[str, Any] = 6
lowerCAmelCase : Any = 128
lowerCAmelCase : List[Any] = (2, 2, 18, 2)
lowerCAmelCase : Any = (4, 8, 16, 32)
elif "large" in model_name:
lowerCAmelCase : Tuple = 12
lowerCAmelCase : Dict = 192
lowerCAmelCase : List[str] = (2, 2, 18, 2)
lowerCAmelCase : Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowerCAmelCase : Optional[int] = window_size
lowerCAmelCase : Any = embed_dim
lowerCAmelCase : Optional[Any] = depths
lowerCAmelCase : int = num_heads
return config
def _snake_case ( _snake_case : Union[str, Any] ):
if "encoder.mask_token" in name:
lowerCAmelCase : Dict = name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowerCAmelCase : Union[str, Any] = name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowerCAmelCase : Optional[Any] = name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowerCAmelCase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowerCAmelCase : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowerCAmelCase : List[str] = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowerCAmelCase : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowerCAmelCase : int = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowerCAmelCase : Optional[int] = name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowerCAmelCase : Tuple = '''layernorm.weight'''
if name == "encoder.norm.bias":
lowerCAmelCase : str = '''layernorm.bias'''
if "decoder" in name:
pass
else:
lowerCAmelCase : Optional[Any] = '''swin.''' + name
return name
def _snake_case ( _snake_case : Optional[Any] , _snake_case : Optional[int] ):
for key in orig_state_dict.copy().keys():
lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_snake_case )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCAmelCase : List[Any] = key.split('''.''' )
lowerCAmelCase : Dict = int(key_split[2] )
lowerCAmelCase : Optional[Any] = int(key_split[4] )
lowerCAmelCase : List[str] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCAmelCase : Dict = val[:dim, :]
lowerCAmelCase : Dict = val[
dim : dim * 2, :
]
lowerCAmelCase : int = val[-dim:, :]
else:
lowerCAmelCase : str = val[
:dim
]
lowerCAmelCase : List[str] = val[
dim : dim * 2
]
lowerCAmelCase : Optional[Any] = val[
-dim:
]
else:
lowerCAmelCase : str = val
return orig_state_dict
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict , _snake_case : str ):
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location='''cpu''' )['''model''']
lowerCAmelCase : List[Any] = get_swin_config(_snake_case )
lowerCAmelCase : List[Any] = SwinForMaskedImageModeling(_snake_case )
model.eval()
lowerCAmelCase : int = convert_state_dict(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
lowerCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : Union[str, Any] = ViTImageProcessor(size={'''height''': 192, '''width''': 192} )
lowerCAmelCase : Union[str, Any] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
lowerCAmelCase : str = image_processor(images=_snake_case , return_tensors='''pt''' )
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**_snake_case ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_snake_case )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
snake_case__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case__ : Dict = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 314 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase_ ( _a):
lowerCamelCase__ : Union[str, Any] = ["pixel_values"]
def __init__( self , a = True , a = None , a = PILImageResampling.BILINEAR , a = True , a = None , a = True , a = 1 / 2_5_5 , a = True , a = None , a = None , **a , ) -> None:
super().__init__(**a )
lowercase__ : Dict = size if size is not None else {'shortest_edge': 2_5_6}
lowercase__ : Union[str, Any] = get_size_dict(a , default_to_square=a )
lowercase__ : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
lowercase__ : Tuple = get_size_dict(a )
lowercase__ : Union[str, Any] = do_resize
lowercase__ : List[Any] = size
lowercase__ : List[Any] = resample
lowercase__ : Any = do_center_crop
lowercase__ : Optional[Any] = crop_size
lowercase__ : int = do_rescale
lowercase__ : int = rescale_factor
lowercase__ : Dict = do_normalize
lowercase__ : Optional[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ : Tuple = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self , a , a , a = PILImageResampling.BICUBIC , a = None , **a , ) -> np.ndarray:
lowercase__ : int = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowercase__ : Optional[int] = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
return resize(a , size=a , resample=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a , ) -> np.ndarray:
lowercase__ : List[str] = get_size_dict(a )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a = None , **a ) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a , a , a = None , **a , ) -> np.ndarray:
return normalize(a , mean=a , std=a , data_format=a , **a )
def _UpperCAmelCase ( self , a , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> Tuple:
lowercase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowercase__ : Tuple = size if size is not None else self.size
lowercase__ : Tuple = get_size_dict(a , default_to_square=a )
lowercase__ : Optional[int] = resample if resample is not None else self.resample
lowercase__ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
lowercase__ : Union[str, Any] = get_size_dict(a )
lowercase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ : Optional[int] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ : List[Any] = image_mean if image_mean is not None else self.image_mean
lowercase__ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase__ : Tuple = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ : int = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ : List[Any] = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_center_crop:
lowercase__ : Any = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase__ : Any = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ : Any = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ : str = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ : int = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 77 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase = logging.get_logger()
@dataclass
class _a :
_a : nn.Module
_a : List[nn.Module] = field(default_factory=_lowercase)
_a : list = field(default_factory=_lowercase)
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Tensor , _SCREAMING_SNAKE_CASE : Tensor )-> Any:
lowerCAmelCase__ : str = len(list(m.modules() ) ) == 1 or isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ) or isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad )
if has_not_submodules:
self.traced.append(_SCREAMING_SNAKE_CASE )
def __call__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(_SCREAMING_SNAKE_CASE )
[x.remove() for x in self.handles]
return self
@property
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda _SCREAMING_SNAKE_CASE : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _a :
_a : nn.Module
_a : nn.Module
_a : int = 1
_a : List = field(default_factory=_lowercase)
_a : List = field(default_factory=_lowercase)
_a : bool = True
def __call__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tensor )-> str:
lowerCAmelCase__ : List[Any] = Tracker(self.dest )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : str = Tracker(self.src )(_SCREAMING_SNAKE_CASE ).parametrized
lowerCAmelCase__ : List[str] = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.src_skip , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = list(filter(lambda _SCREAMING_SNAKE_CASE : type(_SCREAMING_SNAKE_CASE ) not in self.dest_skip , _SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ) and self.raise_if_mismatch:
raise Exception(
F'Numbers of operations are different. Source module has {len(_SCREAMING_SNAKE_CASE )} operations while'
F' destination module has {len(_SCREAMING_SNAKE_CASE )}.' )
for dest_m, src_m in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(F'Transfered from={src_m} to={dest_m}' )
class _a ( nn.Module):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : nn.Module )-> Optional[int]:
super().__init__()
lowerCAmelCase__ : List[Tuple[str, nn.Module]] = []
# - get the stem
feature_blocks.append(('''conv1''', model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith('''block''' ), F'Unexpected layer name {k}'
lowerCAmelCase__ : Optional[int] = len(_SCREAMING_SNAKE_CASE ) + 1
feature_blocks.append((F'res{block_index}', v) )
lowerCAmelCase__ : List[str] = nn.ModuleDict(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Tensor )-> List[str]:
return get_trunk_forward_outputs(
_SCREAMING_SNAKE_CASE , out_feat_keys=_SCREAMING_SNAKE_CASE , feature_blocks=self._feature_blocks , )
class _a ( _lowercase):
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str )-> str:
lowerCAmelCase__ : int = x.split('''-''' )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], Tuple[nn.Module, Dict]]:
# default to timm!
if x not in self:
lowerCAmelCase__ : Optional[Any] = self.convert_name_to_timm(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = partial(lambda: (timm.create_model(_SCREAMING_SNAKE_CASE , pretrained=_SCREAMING_SNAKE_CASE ).eval(), None) )
else:
lowerCAmelCase__ : Any = super().__getitem__(_SCREAMING_SNAKE_CASE )
return val
class _a ( _lowercase):
def __getitem__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str )-> Callable[[], nn.Module]:
if "seer" in x and "in1k" not in x:
lowerCAmelCase__ : int = RegNetModel
else:
lowerCAmelCase__ : List[str] = RegNetForImageClassification
return val
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
for from_key, to_key in keys:
lowerCAmelCase__ : Optional[Any] = from_state_dict[from_key].clone()
print(f'Copied key={from_key} to={to_key}' )
return to_state_dict
def lowerCamelCase_ ( _a , _a , _a , _a , _a , _a = True , ):
"""simple docstring"""
print(f'Converting {name}...' )
with torch.no_grad():
lowerCAmelCase__ , lowerCAmelCase__ : int = from_model_func()
lowerCAmelCase__ : Optional[Any] = our_model_func(_a ).eval()
lowerCAmelCase__ : int = ModuleTransfer(src=_a , dest=_a , raise_if_mismatch=_a )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(_a )
if from_state_dict is not None:
lowerCAmelCase__ : Any = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : List[Any] = [('''0.clf.0.weight''', '''classifier.1.weight'''), ('''0.clf.0.bias''', '''classifier.1.bias''')]
lowerCAmelCase__ : int = manually_copy_vissl_head(_a , our_model.state_dict() , _a )
our_model.load_state_dict(_a )
lowerCAmelCase__ : List[str] = our_model(_a , output_hidden_states=_a )
lowerCAmelCase__ : Dict = (
our_outputs.logits if isinstance(_a , _a ) else our_outputs.last_hidden_state
)
lowerCAmelCase__ : Tuple = from_model(_a )
lowerCAmelCase__ : int = from_output[-1] if type(_a ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
lowerCAmelCase__ : Optional[int] = our_outputs.hidden_states[-1]
assert torch.allclose(_a , _a ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add model''' , use_temp_dir=_a , )
lowerCAmelCase__ : Optional[int] = 224 if '''seer''' not in name else 384
# we can use the convnext one
lowerCAmelCase__ : int = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' , size=_a )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message='''Add image processor''' , use_temp_dir=_a , )
print(f'Pushed {name}' )
def lowerCamelCase_ ( _a , _a = None , _a = True ):
"""simple docstring"""
lowerCAmelCase__ : str = '''imagenet-1k-id2label.json'''
lowerCAmelCase__ : Dict = 1_000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : Optional[int] = '''huggingface/label-files'''
lowerCAmelCase__ : Tuple = num_labels
lowerCAmelCase__ : List[Any] = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
lowerCAmelCase__ : Dict = {int(_a ): v for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = idalabel
lowerCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Dict = partial(_a , num_labels=_a , idalabel=_a , labelaid=_a )
lowerCAmelCase__ : Tuple = {
'''regnet-x-002''': ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type='''x''' ),
'''regnet-x-004''': ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type='''x''' ),
'''regnet-x-016''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type='''x''' ),
'''regnet-x-032''': ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type='''x''' ),
'''regnet-x-040''': ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type='''x''' ),
'''regnet-x-064''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type='''x''' ),
'''regnet-x-080''': ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type='''x''' ),
'''regnet-x-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type='''x''' ),
'''regnet-x-160''': ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type='''x''' ),
'''regnet-x-320''': ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type='''x''' ),
# y variant
'''regnet-y-002''': ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
'''regnet-y-004''': ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
'''regnet-y-006''': ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
'''regnet-y-008''': ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
'''regnet-y-016''': ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
'''regnet-y-032''': ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ),
'''regnet-y-040''': ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ),
'''regnet-y-064''': ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ),
'''regnet-y-080''': ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ),
'''regnet-y-120''': ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ),
'''regnet-y-160''': ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ),
'''regnet-y-320''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
'''regnet-y-320-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer''': RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer''': RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer''': RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
# finetuned on imagenet
'''regnet-y-320-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ),
'''regnet-y-640-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ),
'''regnet-y-1280-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ),
'''regnet-y-2560-seer-in1k''': ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ),
'''regnet-y-10b-seer-in1k''': ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ),
}
lowerCAmelCase__ : Optional[Any] = NameToOurModelFuncMap()
lowerCAmelCase__ : Optional[Any] = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(_a , _a ) -> Tuple[nn.Module, Dict]:
lowerCAmelCase__ : Tuple = torch.hub.load_state_dict_from_url(_a , model_dir=str(_a ) , map_location='''cpu''' )
lowerCAmelCase__ : int = model_func()
# check if we have a head, if yes add it
lowerCAmelCase__ : int = files['''classy_state_dict''']['''base_model''']['''model''']
lowerCAmelCase__ : Tuple = model_state_dict['''trunk''']
model.load_state_dict(_a )
return model.eval(), model_state_dict["heads"]
# pretrained
lowerCAmelCase__ : int = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Tuple = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
lowerCAmelCase__ : List[Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Optional[int] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch''' , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
lowerCAmelCase__ : Union[str, Any] = partial(
_a , '''https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch''' , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=6_20.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _a , _a , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
_a , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _a , _a , _a , )
return config, expected_shape
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported regnet* architecture,'''
''' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 131 | 0 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
lowerCamelCase_ = None
lowerCamelCase_ = {
"""7B""": 1_1_0_0_8,
"""13B""": 1_3_8_2_4,
"""30B""": 1_7_9_2_0,
"""65B""": 2_2_0_1_6,
"""70B""": 2_8_6_7_2,
}
lowerCamelCase_ = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def lowerCamelCase ( a_ , a_=1 , a_=256 ) -> Optional[Any]:
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowerCamelCase ( a_ ) -> int:
with open(a_ , 'r' ) as f:
return json.load(a_ )
def lowerCamelCase ( a_ , a_ ) -> Union[str, Any]:
with open(a_ , 'w' ) as f:
json.dump(a_ , a_ )
def lowerCamelCase ( a_ , a_ , a_ , a_=True ) -> Dict:
os.makedirs(a_ , exist_ok=a_ )
lowerCAmelCase_ = os.path.join(a_ , 'tmp' )
os.makedirs(a_ , exist_ok=a_ )
lowerCAmelCase_ = read_json(os.path.join(a_ , 'params.json' ) )
lowerCAmelCase_ = NUM_SHARDS[model_size]
lowerCAmelCase_ = params['n_layers']
lowerCAmelCase_ = params['n_heads']
lowerCAmelCase_ = n_heads // num_shards
lowerCAmelCase_ = params['dim']
lowerCAmelCase_ = dim // n_heads
lowerCAmelCase_ = 10_000.0
lowerCAmelCase_ = 1.0 / (base ** (torch.arange(0 , a_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCAmelCase_ = params['n_kv_heads'] # for GQA / MQA
lowerCAmelCase_ = n_heads_per_shard // num_key_value_heads
lowerCAmelCase_ = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCAmelCase_ = n_heads
lowerCAmelCase_ = n_heads_per_shard
lowerCAmelCase_ = dim
# permute for sliced rotary
def permute(a_ , a_=n_heads , a_=dim , a_=dim ):
return w.view(a_ , dima // n_heads // 2 , 2 , a_ ).transpose(1 , 2 ).reshape(a_ , a_ )
print(F'''Fetching all parameters from the checkpoint at {input_base_path}.''' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCAmelCase_ = torch.load(os.path.join(a_ , 'consolidated.00.pth' ) , map_location='cpu' )
else:
# Sharded
lowerCAmelCase_ = [
torch.load(os.path.join(a_ , F'''consolidated.{i:02d}.pth''' ) , map_location='cpu' )
for i in range(a_ )
]
lowerCAmelCase_ = 0
lowerCAmelCase_ = {'weight_map': {}}
for layer_i in range(a_ ):
lowerCAmelCase_ = F'''pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowerCAmelCase_ = {
F'''model.layers.{layer_i}.self_attn.q_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wq.weight'''] ),
F'''model.layers.{layer_i}.self_attn.k_proj.weight''': permute(
loaded[F'''layers.{layer_i}.attention.wk.weight'''] ),
F'''model.layers.{layer_i}.self_attn.v_proj.weight''': loaded[F'''layers.{layer_i}.attention.wv.weight'''],
F'''model.layers.{layer_i}.self_attn.o_proj.weight''': loaded[F'''layers.{layer_i}.attention.wo.weight'''],
F'''model.layers.{layer_i}.mlp.gate_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w1.weight'''],
F'''model.layers.{layer_i}.mlp.down_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w2.weight'''],
F'''model.layers.{layer_i}.mlp.up_proj.weight''': loaded[F'''layers.{layer_i}.feed_forward.w3.weight'''],
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[F'''layers.{layer_i}.attention_norm.weight'''],
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[F'''layers.{layer_i}.ffn_norm.weight'''],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCAmelCase_ = {
F'''model.layers.{layer_i}.input_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.attention_norm.weight'''
].clone(),
F'''model.layers.{layer_i}.post_attention_layernorm.weight''': loaded[0][
F'''layers.{layer_i}.ffn_norm.weight'''
].clone(),
}
lowerCAmelCase_ = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wq.weight'''].view(a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ ) )
lowerCAmelCase_ = permute(
torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wk.weight'''].view(
a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ ) , a_ , a_ , a_ , )
lowerCAmelCase_ = torch.cat(
[
loaded[i][F'''layers.{layer_i}.attention.wv.weight'''].view(
a_ , a_ , a_ )
for i in range(a_ )
] , dim=0 , ).reshape(a_ , a_ )
lowerCAmelCase_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.attention.wo.weight'''] for i in range(a_ )] , dim=1 )
lowerCAmelCase_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w1.weight'''] for i in range(a_ )] , dim=0 )
lowerCAmelCase_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w2.weight'''] for i in range(a_ )] , dim=1 )
lowerCAmelCase_ = torch.cat(
[loaded[i][F'''layers.{layer_i}.feed_forward.w3.weight'''] for i in range(a_ )] , dim=0 )
lowerCAmelCase_ = inv_freq
for k, v in state_dict.items():
lowerCAmelCase_ = filename
param_count += v.numel()
torch.save(a_ , os.path.join(a_ , a_ ) )
lowerCAmelCase_ = F'''pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'''
if model_size == "7B":
# Unsharded
lowerCAmelCase_ = {
'model.embed_tokens.weight': loaded['tok_embeddings.weight'],
'model.norm.weight': loaded['norm.weight'],
'lm_head.weight': loaded['output.weight'],
}
else:
lowerCAmelCase_ = {
'model.norm.weight': loaded[0]['norm.weight'],
'model.embed_tokens.weight': torch.cat(
[loaded[i]['tok_embeddings.weight'] for i in range(a_ )] , dim=1 ),
'lm_head.weight': torch.cat([loaded[i]['output.weight'] for i in range(a_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCAmelCase_ = filename
param_count += v.numel()
torch.save(a_ , os.path.join(a_ , a_ ) )
# Write configs
lowerCAmelCase_ = {'total_size': param_count * 2}
write_json(a_ , os.path.join(a_ , 'pytorch_model.bin.index.json' ) )
lowerCAmelCase_ = params['ffn_dim_multiplier'] if 'ffn_dim_multiplier' in params else 1
lowerCAmelCase_ = params['multiple_of'] if 'multiple_of' in params else 256
lowerCAmelCase_ = LlamaConfig(
hidden_size=a_ , intermediate_size=compute_intermediate_size(a_ , a_ , a_ ) , num_attention_heads=params['n_heads'] , num_hidden_layers=params['n_layers'] , rms_norm_eps=params['norm_eps'] , num_key_value_heads=a_ , )
config.save_pretrained(a_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('Loading the checkpoint in a Llama model.' )
lowerCAmelCase_ = LlamaForCausalLM.from_pretrained(a_ , torch_dtype=torch.floataa , low_cpu_mem_usage=a_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print('Saving in the Transformers format.' )
model.save_pretrained(a_ , safe_serialization=a_ )
shutil.rmtree(a_ )
def lowerCamelCase ( a_ , a_ ) -> Any:
# Initialize the tokenizer based on the `spm` model
lowerCAmelCase_ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(F'''Saving a {tokenizer_class.__name__} to {tokenizer_path}.''' )
lowerCAmelCase_ = tokenizer_class(a_ )
tokenizer.save_pretrained(a_ )
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument(
'--input_dir' , help='Location of LLaMA weights, which contains tokenizer.model and model folders' , )
parser.add_argument(
'--model_size' , choices=['7B', '7Bf', '13B', '13Bf', '30B', '65B', '70B', '70Bf', 'tokenizer_only'] , )
parser.add_argument(
'--output_dir' , help='Location to write HF model and tokenizer' , )
parser.add_argument('--safe_serialization' , type=a_ , help='Whether or not to save using `safetensors`.' )
lowerCAmelCase_ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCAmelCase_ = os.path.join(args.input_dir , 'tokenizer.model' )
write_tokenizer(args.output_dir , a_ )
if __name__ == "__main__":
main()
| 14 |
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase ( a_ ) -> Any:
lowerCAmelCase_ = tmp_path / 'file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> List[Any]:
lowerCAmelCase_ = tmp_path / 'malformed_file.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ , a_ ) -> List[str]:
lowerCAmelCase_ = tmp_path / 'csv_with_image.csv'
lowerCAmelCase_ = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> int:
lowerCAmelCase_ = tmp_path / 'csv_with_label.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
@pytest.fixture
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = tmp_path / 'csv_with_int_list.csv'
lowerCAmelCase_ = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(a_ , 'w' ) as f:
f.write(a_ )
return str(a_ )
def lowerCamelCase ( a_ , a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = Csv()
lowerCAmelCase_ = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a_ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(a_ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase ( a_ ) -> Optional[Any]:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_image]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
lowerCAmelCase_ = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase ( a_ ) -> int:
with open(a_ , encoding='utf-8' ) as f:
lowerCAmelCase_ = f.read().splitlines()[1:]
lowerCAmelCase_ = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_label]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
lowerCAmelCase_ = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(a_ ) for label in labels]
def lowerCamelCase ( a_ ) -> Union[str, Any]:
lowerCAmelCase_ = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda a_ : [int(a_ ) for i in x.split()]} )
lowerCAmelCase_ = csv._generate_tables([[csv_file_with_int_list]] )
lowerCAmelCase_ = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
lowerCAmelCase_ = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 14 | 1 |
"""simple docstring"""
import socket
def _lowerCamelCase( ):
__a = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__a = socket.gethostname()
__a = 1_2_3_1_2
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
__a = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(a )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 261 | """simple docstring"""
def _lowerCamelCase( a = 1_0_0_0 ):
__a = 3
__a = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 261 | 1 |
"""simple docstring"""
def snake_case (A_ :str , A_ :bool = False ):
'''simple docstring'''
if not isinstance(A_ , A_ ):
a : Union[str, Any] = f'''Expected string as input, found {type(A_ )}'''
raise ValueError(A_ )
if not isinstance(A_ , A_ ):
a : Optional[int] = f'''Expected boolean as use_pascal parameter, found {type(A_ )}'''
raise ValueError(A_ )
a : Tuple = input_str.split('_' )
a : Dict = 0 if use_pascal else 1
a : int = words[start_index:]
a : int = [word[0].upper() + word[1:] for word in words_to_capitalize]
a : List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCamelCase : List[Any] = {
'microsoft/beit-base-patch16-224-pt22k': (
'https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class snake_case ( UpperCAmelCase ):
__magic_name__ = '''beit'''
def __init__( self : int , A : int=8_1_9_2 , A : List[Any]=7_6_8 , A : str=1_2 , A : str=1_2 , A : Dict=3_0_7_2 , A : Optional[int]="gelu" , A : List[Any]=0.0 , A : Union[str, Any]=0.0 , A : Optional[Any]=0.02 , A : Optional[int]=1E-12 , A : Dict=2_2_4 , A : str=1_6 , A : Optional[Any]=3 , A : List[Any]=False , A : Union[str, Any]=False , A : Optional[Any]=False , A : int=False , A : List[str]=0.1 , A : Union[str, Any]=0.1 , A : str=True , A : Tuple=[3, 5, 7, 1_1] , A : List[str]=[1, 2, 3, 6] , A : Optional[Any]=True , A : Union[str, Any]=0.4 , A : Any=2_5_6 , A : List[Any]=1 , A : Optional[Any]=False , A : Any=2_5_5 , **A : List[Any] , ):
'''simple docstring'''
super().__init__(**A )
a : Optional[int] = vocab_size
a : Dict = hidden_size
a : Optional[int] = num_hidden_layers
a : Tuple = num_attention_heads
a : Optional[int] = intermediate_size
a : Optional[Any] = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[Any] = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Union[str, Any] = image_size
a : str = patch_size
a : Optional[Any] = num_channels
a : List[str] = use_mask_token
a : Optional[Any] = use_absolute_position_embeddings
a : Any = use_relative_position_bias
a : Any = use_shared_relative_position_bias
a : Dict = layer_scale_init_value
a : Optional[int] = drop_path_rate
a : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
a : Optional[Any] = out_indices
a : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
a : Tuple = use_auxiliary_head
a : Dict = auxiliary_loss_weight
a : Any = auxiliary_channels
a : Dict = auxiliary_num_convs
a : List[str] = auxiliary_concat_input
a : List[Any] = semantic_loss_ignore_index
class snake_case ( UpperCAmelCase ):
__magic_name__ = version.parse('''1.11''' )
@property
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return 1E-4
| 186 | 0 |
"""simple docstring"""
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(A__ )]
UpperCAmelCase_ : List[str] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(A__ ) <= key:
return input_string
for position, character in enumerate(A__ ):
UpperCAmelCase_ : List[str] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Optional[int] = min(A__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(A__ )
UpperCAmelCase_ : List[str] = ["".join(A__ ) for row in temp_grid]
UpperCAmelCase_ : Any = "".join(A__ )
return output_string
def snake_case ( A__ ,A__ ):
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Tuple = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
UpperCAmelCase_ : list[list[str]] = [[] for _ in range(A__ )] # generates template
for position in range(len(A__ ) ):
UpperCAmelCase_ : Any = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : List[str] = min(A__ ,lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
UpperCAmelCase_ : Union[str, Any] = 0
for row in temp_grid: # fills in the characters
UpperCAmelCase_ : Dict = input_string[counter : counter + len(A__ )]
grid.append(list(A__ ) )
counter += len(A__ )
UpperCAmelCase_ : Optional[int] = "" # reads as zigzag
for position in range(len(A__ ) ):
UpperCAmelCase_ : List[str] = position % (lowest * 2) # puts it in bounds
UpperCAmelCase_ : Any = min(A__ ,lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def snake_case ( A__ ):
UpperCAmelCase_ : Tuple = {}
for key_guess in range(1 ,len(A__ ) ): # tries every key
UpperCAmelCase_ : Optional[int] = decrypt(A__ ,A__ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 268 |
"""simple docstring"""
from __future__ import annotations
import time
lowerCamelCase_ = list[tuple[int, int]]
lowerCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class UpperCamelCase_ :
def __init__( self : Optional[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Node | None ) -> Dict:
UpperCAmelCase_ : Any = pos_x
UpperCAmelCase_ : str = pos_y
UpperCAmelCase_ : int = (pos_y, pos_x)
UpperCAmelCase_ : int = goal_x
UpperCAmelCase_ : Tuple = goal_y
UpperCAmelCase_ : Union[str, Any] = parent
class UpperCamelCase_ :
def __init__( self : List[Any] , lowerCAmelCase_ : tuple[int, int] , lowerCAmelCase_ : tuple[int, int] ) -> Tuple:
UpperCAmelCase_ : List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = [self.start]
UpperCAmelCase_ : int = False
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Path | None:
while self.node_queue:
UpperCAmelCase_ : str = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
UpperCAmelCase_ : Optional[Any] = True
return self.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_successors(lowerCAmelCase_ )
for node in successors:
self.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : Node ) -> list[Node]:
UpperCAmelCase_ : List[str] = []
for action in delta:
UpperCAmelCase_ : List[Any] = parent.pos_x + action[1]
UpperCAmelCase_ : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCAmelCase_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(lowerCAmelCase_ , lowerCAmelCase_ , self.target.pos_y , self.target.pos_x , lowerCAmelCase_ ) )
return successors
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node | None ) -> Path:
UpperCAmelCase_ : Union[str, Any] = node
UpperCAmelCase_ : Union[str, Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCAmelCase_ : Tuple = current_node.parent
path.reverse()
return path
class UpperCamelCase_ :
def __init__( self : str , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = BreadthFirstSearch(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Path | None:
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
UpperCAmelCase_ : int = self.fwd_bfs.node_queue.pop(0 )
UpperCAmelCase_ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
UpperCAmelCase_ : str = True
return self.retrace_bidirectional_path(
lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : str = current_bwd_node
UpperCAmelCase_ : List[str] = current_fwd_node
UpperCAmelCase_ : Tuple = {
self.fwd_bfs: self.fwd_bfs.get_successors(lowerCAmelCase_ ),
self.bwd_bfs: self.bwd_bfs.get_successors(lowerCAmelCase_ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(lowerCAmelCase_ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Node , lowerCAmelCase_ : Node ) -> Path:
UpperCAmelCase_ : Optional[Any] = self.fwd_bfs.retrace_path(lowerCAmelCase_ )
UpperCAmelCase_ : Dict = self.bwd_bfs.retrace_path(lowerCAmelCase_ )
bwd_path.pop()
bwd_path.reverse()
UpperCAmelCase_ : str = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
lowerCamelCase_ = (0, 0)
lowerCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BreadthFirstSearch(init, goal)
lowerCamelCase_ = bfs.search()
lowerCamelCase_ = time.time() - start_bfs_time
print('''Unidirectional BFS computation time : ''', bfs_time)
lowerCamelCase_ = time.time()
lowerCamelCase_ = BidirectionalBreadthFirstSearch(init, goal)
lowerCamelCase_ = bd_bfs.search()
lowerCamelCase_ = time.time() - start_bd_bfs_time
print('''Bidirectional BFS computation time : ''', bd_bfs_time)
| 268 | 1 |
'''simple docstring'''
from itertools import product
def a_ ( __snake_case : int , __snake_case : int ) -> list[int]:
"""simple docstring"""
lowerCamelCase_ =sides_number
lowerCamelCase_ =max_face_number * dice_number
lowerCamelCase_ =[0] * (max_total + 1)
lowerCamelCase_ =1
lowerCamelCase_ =range(__snake_case , max_face_number + 1 )
for dice_numbers in product(__snake_case , repeat=__snake_case ):
lowerCamelCase_ =sum(__snake_case )
totals_frequencies[total] += 1
return totals_frequencies
def a_ ( ) -> float:
"""simple docstring"""
lowerCamelCase_ =total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCamelCase_ =total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCamelCase_ =0
lowerCamelCase_ =9
lowerCamelCase_ =4 * 9
lowerCamelCase_ =6
for peter_total in range(__snake_case , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCamelCase_ =(4**9) * (6**6)
lowerCamelCase_ =peter_wins_count / total_games_number
lowerCamelCase_ =round(__snake_case , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 6 |
'''simple docstring'''
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
a_ : List[str] = """src/diffusers"""
# Matches is_xxx_available()
a_ : int = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
a_ : List[str] = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
a_ : Optional[Any] = """
{0} = None
"""
a_ : List[Any] = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
a_ : Optional[Any] = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def a_ ( __snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ =_re_backend.findall(__snake_case )
if len(__snake_case ) == 0:
return None
return "_and_".join(__snake_case )
def a_ ( ) -> Optional[int]:
"""simple docstring"""
with open(os.path.join(__snake_case , '''__init__.py''' ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.readlines()
# Get to the point we do the actual imports for type checking
lowerCamelCase_ =0
lowerCamelCase_ ={}
# Go through the end of the file
while line_index < len(__snake_case ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
lowerCamelCase_ =find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('''else:''' ):
line_index += 1
line_index += 1
lowerCamelCase_ =[]
# Until we unindent, add backend objects to the list
while line_index < len(__snake_case ) and len(lines[line_index] ) > 1:
lowerCamelCase_ =lines[line_index]
lowerCamelCase_ =_re_single_line_import.search(__snake_case )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(__snake_case ) > 0:
lowerCamelCase_ =objects
else:
line_index += 1
return backend_specific_objects
def a_ ( __snake_case : Dict , __snake_case : int ) -> Union[str, Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(__snake_case )
elif name.islower():
return DUMMY_FUNCTION.format(__snake_case , __snake_case )
else:
return DUMMY_CLASS.format(__snake_case , __snake_case )
def a_ ( __snake_case : Tuple=None ) -> List[str]:
"""simple docstring"""
if backend_specific_objects is None:
lowerCamelCase_ =read_init()
# For special correspondence backend to module name as used in the function requires_modulename
lowerCamelCase_ ={}
for backend, objects in backend_specific_objects.items():
lowerCamelCase_ ='''[''' + ''', '''.join(F'''"{b}"''' for b in backend.split('''_and_''' ) ) + ''']'''
lowerCamelCase_ ='''# This file is autogenerated by the command `make fix-copies`, do not edit.\n'''
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(__snake_case , __snake_case ) for o in objects] )
lowerCamelCase_ =dummy_file
return dummy_files
def a_ ( __snake_case : Dict=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
lowerCamelCase_ ={'''torch''': '''pt'''}
# Locate actual dummy modules and read their content.
lowerCamelCase_ =os.path.join(__snake_case , '''utils''' )
lowerCamelCase_ ={
backend: os.path.join(__snake_case , F'''dummy_{short_names.get(__snake_case , __snake_case )}_objects.py''' )
for backend in dummy_files.keys()
}
lowerCamelCase_ ={}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(__snake_case ):
with open(__snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCamelCase_ =f.read()
else:
lowerCamelCase_ =''''''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'''Updating diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py as the main '''
'''__init__ has new objects.''' )
with open(dummy_file_paths[backend] , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'''The main __init__ has objects that are not present in '''
F'''diffusers.utils.dummy_{short_names.get(__snake_case , __snake_case )}_objects.py. Run `make fix-copies` '''
'''to fix this.''' )
if __name__ == "__main__":
a_ : Tuple = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a_ : Tuple = parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 6 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase_ :
def __init__( self , a , a=13 , a=10 , a=3 , a=2 , a=2 , a=True , a=True , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=10 , a=0.02 , a="divided_space_time" , a=None , ):
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_frames
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = attention_type
UpperCamelCase__ = initializer_range
UpperCamelCase__ = scope
UpperCamelCase__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
UpperCamelCase__ = (image_size // patch_size) ** 2
UpperCamelCase__ = (num_frames) * self.num_patches_per_frame + 1
def __a ( self ):
UpperCamelCase__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def __a ( self ):
UpperCamelCase__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
UpperCamelCase__ = self.num_labels
return config
def __a ( self , a , a , a ):
UpperCamelCase__ = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self , a , a , a ):
UpperCamelCase__ = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ = model(lowercase_ )
# verify the logits shape
UpperCamelCase__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def __a ( self ):
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __A , __A , unittest.TestCase ):
__UpperCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__UpperCAmelCase = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __a ( self ):
UpperCamelCase__ = TimesformerModelTester(self )
UpperCamelCase__ = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def __a ( self , a , a , a=False ):
UpperCamelCase__ = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
UpperCamelCase__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def __a ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def __a ( self ):
pass
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(lowercase_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def __a ( self ):
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def __a ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( self ):
if not self.has_attentions:
pass
else:
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
UpperCamelCase__ = self.model_tester.seq_length
UpperCamelCase__ = self.model_tester.num_frames
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = True
UpperCamelCase__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ = True
UpperCamelCase__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
UpperCamelCase__ = len(lowercase_ )
# Check attention is always last and order is fine
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
UpperCamelCase__ = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __a ( self ):
def check_hidden_states_output(a , a , a ):
UpperCamelCase__ = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
UpperCamelCase__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def _UpperCamelCase ( ) -> int:
'''simple docstring'''
UpperCamelCase__ = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
UpperCamelCase__ = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def __a ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __a ( self ):
UpperCamelCase__ = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
lowercase_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_video()
UpperCamelCase__ = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**lowercase_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCamelCase__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4 ) )
| 80 | import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_ ( __A ):
__A : Optional[Any] = ["image_processor", "tokenizer"]
__A : Tuple = "LayoutLMv3ImageProcessor"
__A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast")
def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]:
lowercase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowercase_ , )
lowercase__ : Optional[int] = kwargs.pop("feature_extractor" )
lowercase__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowercase_ , lowercase_ )
def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowercase_ , lowercase_ ):
lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase__ : Any = features["words"]
lowercase__ : Tuple = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel values
lowercase__ : Optional[int] = features.pop("pixel_values" )
if return_overflowing_tokens is True:
lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] )
lowercase__ : str = images
return encoded_inputs
def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
lowercase__ : Tuple = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowercase_ ) != len(lowercase_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F''' {len(lowercase_ )} and {len(lowercase_ )}''' )
return images_with_overflow
def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def __UpperCamelCase ( self : Any ) -> Any:
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , )
return self.image_processor_class
@property
def __UpperCamelCase ( self : List[Any] ) -> Tuple:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , )
return self.image_processor
| 87 | 0 |
from manim import *
class __a ( A__ ):
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase__: List[Any] = Rectangle(height=0.5 , width=0.5 )
lowercase__: List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase__: Dict = [mem.copy() for i in range(6 )]
lowercase__: Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase__: Any = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowercase__: List[str] = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowercase__: int = VGroup(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowercase__: Any = Text('CPU' , font_size=24 )
lowercase__: Optional[Any] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowerCamelCase__ )
lowercase__: Any = [mem.copy() for i in range(1 )]
lowercase__: Tuple = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowercase__: Optional[int] = Text('GPU' , font_size=24 )
lowercase__: Optional[int] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
gpu.align_to(lowerCamelCase__ , lowerCamelCase__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowerCamelCase__ )
lowercase__: List[str] = [mem.copy() for i in range(6 )]
lowercase__: Dict = VGroup(*lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0 )
lowercase__: List[Any] = Text('Model' , font_size=24 )
lowercase__: List[str] = Group(lowerCamelCase__ , lowerCamelCase__ ).arrange(lowerCamelCase__ , buff=0.5 , aligned_edge=lowerCamelCase__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , Create(lowerCamelCase__ , run_time=1 ) , )
lowercase__: Union[str, Any] = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase__: Any = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__: List[Any] = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowerCamelCase__ , run_time=2.5 ) , Write(lowerCamelCase__ ) , Write(lowerCamelCase__ ) )
self.add(lowerCamelCase__ )
lowercase__: Optional[Any] = []
lowercase__: Optional[int] = []
lowercase__: str = []
for i, rect in enumerate(lowerCamelCase__ ):
lowercase__: List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase__ , opacity=0.7 )
cpu_target.move_to(lowerCamelCase__ )
cpu_target.generate_target()
lowercase__: Any = 0.4_6 / 4
lowercase__: Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowerCamelCase__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase__ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase__ , buff=0.0 )
cpu_targs.append(lowerCamelCase__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase__ ) )
second_animations.append(MoveToTarget(lowerCamelCase__ , run_time=1.5 ) )
self.play(*lowerCamelCase__ )
self.play(*lowerCamelCase__ )
self.wait()
| 357 |
def snake_case_ ( snake_case ) -> list[int]:
lowercase__: Dict = [0 for i in range(len(snake_case ) )]
# initialize interval's left pointer and right pointer
lowercase__ , lowercase__: Union[str, Any] = 0, 0
for i in range(1 , len(snake_case ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__: List[Any] = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__: List[str] = min_edge
while go_next(snake_case , snake_case , snake_case ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__ , lowercase__: List[Any] = i, i + z_result[i] - 1
return z_result
def snake_case_ ( snake_case , snake_case , snake_case ) -> bool:
return i + z_result[i] < len(snake_case ) and s[z_result[i]] == s[i + z_result[i]]
def snake_case_ ( snake_case , snake_case ) -> int:
lowercase__: Tuple = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__: Any = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(snake_case ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 288 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.