code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __A :
def __init__(self : str , __a : Optional[Any] , __a : Union[str, Any]=14 , __a : List[str]=7 , __a : Union[str, Any]=True , __a : int=True , __a : List[str]=True , __a : str=True , __a : str=True , __a : List[str]=99 , __a : Union[str, Any]=32 , __a : Optional[Any]=5 , __a : int=4 , __a : List[Any]=37 , __a : Optional[Any]="gelu" , __a : Tuple=0.1 , __a : Any=0.1 , __a : int=512 , __a : int=16 , __a : Optional[int]=2 , __a : Any=0.02 , __a : str=3 , __a : Optional[Any]=4 , __a : Optional[Any]=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = use_mc_token_ids
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = self.vocab_size - 1
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
if self.use_mc_token_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase (self : Optional[int] ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def _lowercase (self : Dict , __a : List[str] , __a : int , __a : Tuple , __a : int , __a : Union[str, Any] , *__a : List[Any] ):
UpperCAmelCase_ = CTRLModel(config=__a )
model.to(__a )
model.eval()
model(__a , token_type_ids=__a , head_mask=__a )
model(__a , token_type_ids=__a )
UpperCAmelCase_ = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def _lowercase (self : List[Any] , __a : Tuple , __a : Any , __a : Tuple , __a : str , __a : Tuple , *__a : str ):
UpperCAmelCase_ = CTRLLMHeadModel(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase (self : List[str] ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask}
return config, inputs_dict
def _lowercase (self : Tuple , __a : str , __a : Union[str, Any] , __a : List[str] , __a : Optional[Any] , *__a : Optional[int] ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = CTRLForSequenceClassification(__a )
model.to(__a )
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = model(__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __A ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
a__ : Dict = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
a__ : Union[str, Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
a__ : List[Any] = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
a__ : Any = True
a__ : Union[str, Any] = False
a__ : Dict = False
def _lowercase (self : List[Any] , __a : int , __a : Optional[int] , __a : Dict , __a : int , __a : List[Any] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def _lowercase (self : Any ):
UpperCAmelCase_ = CTRLModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__a , n_embd=37 )
def _lowercase (self : Union[str, Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self : Optional[int] ):
self.config_tester.run_common_tests()
def _lowercase (self : Union[str, Any] ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__a )
def _lowercase (self : str ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase (self : Optional[int] ):
pass
@slow
def _lowercase (self : Union[str, Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = CTRLModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :)
def _lowercase (self : List[str] ):
pass
@require_torch
class __A ( unittest.TestCase ):
def _lowercase (self : Optional[Any] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def _lowercase (self : Optional[int] ):
UpperCAmelCase_ = CTRLLMHeadModel.from_pretrained("ctrl" )
model.to(__a )
UpperCAmelCase_ = torch.tensor(
[[11859, 0, 1611, 8]] , dtype=torch.long , device=__a ) # Legal the president is
UpperCAmelCase_ = [
11859,
0,
1611,
8,
5,
150,
26449,
2,
19,
348,
469,
3,
2595,
48,
20740,
246533,
246533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
UpperCAmelCase_ = model.generate(__a , do_sample=__a )
self.assertListEqual(output_ids[0].tolist() , __a )
| 1
|
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class __UpperCAmelCase (unittest.TestCase ):
def __init__( self: Optional[Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any]=13 , UpperCAmelCase_: List[str]=7 , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: List[str]=True , UpperCAmelCase_: Optional[Any]=True , UpperCAmelCase_: str=99 , UpperCAmelCase_: List[Any]=32 , UpperCAmelCase_: Dict=5 , UpperCAmelCase_: Tuple=4 , UpperCAmelCase_: Optional[Any]=37 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: Optional[Any]=0.1 , UpperCAmelCase_: List[Any]=0.1 , UpperCAmelCase_: List[Any]=512 , UpperCAmelCase_: Any=16 , UpperCAmelCase_: Dict=2 , UpperCAmelCase_: Union[str, Any]=0.02 , UpperCAmelCase_: Union[str, Any]=4 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_choices
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCAmelCase_ , )
return config, input_ids, attention_mask
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[int] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
_SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
@require_flax
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_SCREAMING_SNAKE_CASE = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = (1, 11, 768)
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = np.array([[[-0.16_39, 0.32_99, 0.16_48], [-0.17_46, 0.32_89, 0.17_10], [-0.18_84, 0.33_57, 0.18_10]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCAmelCase_ , atol=1E-4 ) )
| 306
| 0
|
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowerCamelCase : Optional[int] = input("Enter image url: ").strip()
print(F"""Downloading image from {url} ...""")
lowerCamelCase : Optional[int] = BeautifulSoup(requests.get(url).content, "html.parser")
# The image URL is in the content field of the first meta tag with property og:image
lowerCamelCase : Tuple = soup.find("meta", {"property": "og:image"})["content"]
lowerCamelCase : Optional[Any] = requests.get(image_url).content
lowerCamelCase : List[str] = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, "wb") as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 366
|
def _SCREAMING_SNAKE_CASE ( lowercase : Tuple , lowercase : Dict , lowercase : List[str] , lowercase : Dict , lowercase : Dict , lowercase : List[str] ):
'''simple docstring'''
if index == r:
for j in range(lowercase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowerCamelCase_ = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCamelCase : int = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 208
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A( snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = UnCLIPImageVariationPipeline
UpperCamelCase : Optional[Any] = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
UpperCamelCase : Optional[int] = IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase : List[Any] = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
UpperCamelCase : Optional[int] = False
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return 32
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim
@property
def UpperCAmelCase_ ( self ):
return self.time_input_dim * 4
@property
def UpperCAmelCase_ ( self ):
return 100
@property
def UpperCAmelCase_ ( self ):
__A : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(_A )
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
__A : Tuple = UnCLIPTextProjModel(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Optional[int] = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
__A : Optional[Any] = UNetaDConditionModel(**_A )
return model
@property
def UpperCAmelCase_ ( self ):
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def UpperCAmelCase_ ( self ):
torch.manual_seed(0 )
__A : Any = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def UpperCAmelCase_ ( self ):
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
__A : Optional[Any] = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.dummy_decoder
__A : int = self.dummy_text_proj
__A : Dict = self.dummy_text_encoder
__A : Optional[int] = self.dummy_tokenizer
__A : Tuple = self.dummy_super_res_first
__A : Union[str, Any] = self.dummy_super_res_last
__A : Any = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__A : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
__A : Union[str, Any] = CLIPImageProcessor(crop_size=32 , size=32 )
__A : int = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def UpperCAmelCase_ ( self , _A , _A=0 , _A=True ):
__A : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
__A : Dict = torch.manual_seed(_A )
else:
__A : List[Any] = torch.Generator(device=_A ).manual_seed(_A )
if pil_image:
__A : Tuple = input_image * 0.5 + 0.5
__A : Union[str, Any] = input_image.clamp(0 , 1 )
__A : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : List[Any] = DiffusionPipeline.numpy_to_pil(_A )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def UpperCAmelCase_ ( self ):
__A : Any = 'cpu'
__A : Tuple = self.get_dummy_components()
__A : Tuple = self.pipeline_class(**_A )
__A : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Optional[int] = self.get_dummy_inputs(_A , pil_image=_A )
__A : Dict = pipe(**_A )
__A : Tuple = output.images
__A : Union[str, Any] = self.get_dummy_inputs(_A , pil_image=_A )
__A : int = pipe(
**_A , return_dict=_A , )[0]
__A : Optional[Any] = image[0, -3:, -3:, -1]
__A : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : Dict = np.array(
[
0.9_9_9_7,
0.0_0_0_2,
0.9_9_9_7,
0.9_9_9_7,
0.9_9_6_9,
0.0_0_2_3,
0.9_9_9_7,
0.9_9_6_9,
0.9_9_7_0,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'cpu'
__A : List[Any] = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**_A )
__A : List[str] = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : List[str] = self.get_dummy_inputs(_A , pil_image=_A )
__A : List[str] = pipe(**_A )
__A : Optional[Any] = output.images
__A : Optional[int] = self.get_dummy_inputs(_A , pil_image=_A )
__A : Dict = pipe(
**_A , return_dict=_A , )[0]
__A : Optional[Any] = image[0, -3:, -3:, -1]
__A : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__A : Tuple = np.array([0.9_9_9_7, 0.0_0_0_3, 0.9_9_9_7, 0.9_9_9_7, 0.9_9_7_0, 0.0_0_2_4, 0.9_9_9_7, 0.9_9_7_1, 0.9_9_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A : Optional[int] = 'cpu'
__A : str = self.get_dummy_components()
__A : List[str] = self.pipeline_class(**_A )
__A : Dict = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : List[str] = self.get_dummy_inputs(_A , pil_image=_A )
__A : Union[str, Any] = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
__A : Optional[int] = pipe(**_A )
__A : Tuple = output.images
__A : Dict = self.get_dummy_inputs(_A , pil_image=_A )
__A : Any = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
__A : Union[str, Any] = pipe(
**_A , return_dict=_A , )[0]
__A : List[Any] = image[0, -3:, -3:, -1]
__A : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
__A : Any = np.array(
[
0.9_9_9_7,
0.9_9_8_9,
0.0_0_0_8,
0.0_0_2_1,
0.9_9_6_0,
0.0_0_1_8,
0.0_0_1_4,
0.0_0_0_2,
0.9_9_3_3,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCAmelCase_ ( self ):
__A : Tuple = torch.device('cpu' )
class _A:
"""simple docstring"""
UpperCamelCase : List[Any] = 1
__A : str = self.get_dummy_components()
__A : Optional[int] = self.pipeline_class(**_A )
__A : Tuple = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
__A : Optional[int] = torch.Generator(device=_A ).manual_seed(0 )
__A : List[str] = pipe.decoder.dtype
__A : List[Any] = 1
__A : Optional[int] = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
__A : Any = pipe.prepare_latents(
_A , dtype=_A , device=_A , generator=_A , latents=_A , scheduler=DummyScheduler() )
__A : Tuple = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
__A : Tuple = pipe.prepare_latents(
_A , dtype=_A , device=_A , generator=_A , latents=_A , scheduler=DummyScheduler() )
__A : Optional[int] = self.get_dummy_inputs(_A , pil_image=_A )
__A : Dict = pipe(
**_A , decoder_latents=_A , super_res_latents=_A ).images
__A : int = self.get_dummy_inputs(_A , pil_image=_A )
# Don't pass image, instead pass embedding
__A : Any = pipeline_inputs.pop('image' )
__A : List[str] = pipe.image_encoder(_A ).image_embeds
__A : List[str] = pipe(
**_A , decoder_latents=_A , super_res_latents=_A , image_embeddings=_A , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1e-4
@skip_mps
def UpperCAmelCase_ ( self ):
__A : str = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
__A : Tuple = 1e-2
self._test_attention_slicing_forward_pass(
test_max_difference=_A , expected_max_diff=_A )
@skip_mps
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = torch_device == 'cpu'
__A : List[Any] = True
__A : Dict = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=_A , relax_max_difference=_A , additional_params_copy_to_batched_inputs=_A , )
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
__A : Any = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=_A , additional_params_copy_to_batched_inputs=_A , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=_A )
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase_ ( self ):
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase_ ( self ):
__A : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
__A : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
__A : Optional[Any] = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
__A : Tuple = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
__A : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
__A : List[Any] = pipeline(
_A , generator=_A , output_type='np' , )
__A : Optional[Any] = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(_A , _A , 15 )
| 280
|
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class _A:
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( *_A , **_A ):
pass
def _SCREAMING_SNAKE_CASE ( a ) -> str:
__A : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( a ) -> Dict:
__A : Dict = np.array(a )
__A : List[Any] = npimg.shape
return {"hash": hashimage(a ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class _A( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : str = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
UpperCamelCase : int = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def UpperCAmelCase_ ( self , _A , _A , _A ):
__A : Dict = MaskGenerationPipeline(model=_A , image_processor=_A )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCAmelCase_ ( self , _A , _A ):
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def UpperCAmelCase_ ( self ):
pass
@slow
@require_torch
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
__A : List[str] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=256 )
# Shortening by hashing
__A : List[Any] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9_9_6_7},
{'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.9_9_3},
{'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9_9_0_9},
{'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9_8_7_9},
{'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9_8_3_4},
{'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9_7_1_6},
{'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9_6_1_2},
{'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9_5_9_9},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9_5_5_2},
{'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9_5_3_2},
{'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9_5_1_6},
{'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9_4_9_9},
{'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9_4_8_3},
{'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9_4_6_4},
{'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.9_4_3},
{'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9_4_0_8},
{'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9_3_3_5},
{'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9_3_2_6},
{'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9_2_6_2},
{'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8_9_9_9},
{'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8_9_8_6},
{'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8_9_8_4},
{'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8_8_7_3},
{'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = 'facebook/sam-vit-huge'
__A : List[str] = pipeline('mask-generation' , model=_A )
__A : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__A : List[str] = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(_A ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0_4_4_4},
{'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.0_2_1_0},
{'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0_1_6_7},
{'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0_1_3_2},
{'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0_0_5_3},
] , )
| 280
| 1
|
import math
def a__ ( UpperCAmelCase : int ) -> bool:
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
UpperCAmelCase : List[Any] = range(3 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def a__ ( UpperCAmelCase : str , UpperCAmelCase : Any=1 , **UpperCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : int = factor * value
UpperCAmelCase : Any = value
while not is_prime(UpperCAmelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **UpperCAmelCase )
return value
| 99
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowerCamelCase : Optional[Any] = "Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"
def a__ ( ) -> List[Any]:
UpperCAmelCase : Dict = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCAmelCase : List[Any] = get_sagemaker_input()
else:
UpperCAmelCase : Optional[Any] = get_cluster_input()
return config
def a__ ( UpperCAmelCase : Union[str, Any]=None ) -> List[Any]:
if subparsers is not None:
UpperCAmelCase : Optional[Any] = subparsers.add_parser('''config''' , description=UpperCAmelCase )
else:
UpperCAmelCase : List[str] = argparse.ArgumentParser('''Accelerate config command''' , description=UpperCAmelCase )
parser.add_argument(
'''--config_file''' , default=UpperCAmelCase , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=UpperCAmelCase )
return parser
def a__ ( UpperCAmelCase : List[Any] ) -> Optional[int]:
UpperCAmelCase : str = get_user_input()
if args.config_file is not None:
UpperCAmelCase : Any = args.config_file
else:
if not os.path.isdir(UpperCAmelCase ):
os.makedirs(UpperCAmelCase )
UpperCAmelCase : List[str] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(UpperCAmelCase )
else:
config.to_yaml_file(UpperCAmelCase )
print(f'''accelerate configuration saved at {config_file}''' )
def a__ ( ) -> Dict:
UpperCAmelCase : str = config_command_parser()
UpperCAmelCase : str = parser.parse_args()
config_command(UpperCAmelCase )
if __name__ == "__main__":
main()
| 99
| 1
|
from __future__ import annotations
class A__ :
"""simple docstring"""
def __init__( self , lowercase) -> None:
'''simple docstring'''
a__ : List[Any] = data
a__ : Node | None = None
a__ : Node | None = None
def A_ ( A__ ) -> None: # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def A_ ( A__ ) -> int:
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def A_ ( A__ ) -> bool:
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def A_ ( ) -> None: # Main function for testing.
a__ : Optional[int] = Node(1 )
a__ : Optional[int] = Node(2 )
a__ : Tuple = Node(3 )
a__ : Tuple = Node(4 )
a__ : Tuple = Node(5 )
a__ : Optional[Any] = Node(6 )
a__ : Optional[int] = Node(7 )
a__ : str = Node(8 )
a__ : Optional[int] = Node(9 )
print(is_full_binary_tree(A__ ) )
print(depth_of_tree(A__ ) )
print('Tree is: ' )
display(A__ )
if __name__ == "__main__":
main()
| 99
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
a__ : List[str] = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE : str = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE : Optional[Any] = get_cluster_input()
return config
def UpperCAmelCase_( a__=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = subparsers.add_parser('''config''' , description=a__ )
else:
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser('''Accelerate config command''' , description=a__ )
parser.add_argument(
'''--config_file''' , default=a__ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE : int = args.config_file
else:
if not os.path.isdir(a__ ):
os.makedirs(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(a__ )
else:
config.to_yaml_file(a__ )
print(F"""accelerate configuration saved at {config_file}""" )
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = config_command_parser()
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
config_command(a__ )
if __name__ == "__main__":
main()
| 19
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=a__ )
SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=a__ )
env_command_parser(subparsers=a__ )
launch_command_parser(subparsers=a__ )
tpu_command_parser(subparsers=a__ )
test_command_parser(subparsers=a__ )
# Let's go
SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
if not hasattr(a__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(a__ )
if __name__ == "__main__":
main()
| 19
| 1
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int:
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]:
# A temporary array to store all combination one by one
__snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = [
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : List[str] = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 24
| 1
|
def a__ ( UpperCAmelCase : Dict = 50 ) -> int:
UpperCAmelCase : Any = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 336
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( _lowerCAmelCase ) -> str:
__lowerCamelCase ,__lowerCamelCase : List[Any] = image.size
__lowerCamelCase ,__lowerCamelCase : List[str] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__lowerCamelCase : Optional[Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['lanczos'] )
__lowerCamelCase : List[Any] = np.array(_lowerCAmelCase ).astype(np.floataa ) / 255.0
__lowerCamelCase : Optional[Any] = image[None].transpose(0 ,3 ,1 ,2 )
__lowerCamelCase : int = torch.from_numpy(_lowerCAmelCase )
return 2.0 * image - 1.0
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : str , _a : VQModel , _a : UNetaDModel , _a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ) -> Optional[Any]:
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : List[Any] , _a : Union[torch.Tensor, PIL.Image.Image] = None , _a : Optional[int] = 1 , _a : Optional[int] = 100 , _a : Optional[float] = 0.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : Optional[str] = "pil" , _a : bool = True , ) -> Union[Tuple, ImagePipelineOutput]:
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : Any = 1
elif isinstance(_a , torch.Tensor ):
__lowerCamelCase : Any = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a , PIL.Image.Image ):
__lowerCamelCase : List[str] = preprocess(_a )
__lowerCamelCase ,__lowerCamelCase : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__lowerCamelCase : Union[str, Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
__lowerCamelCase : Tuple = next(self.unet.parameters() ).dtype
__lowerCamelCase : Optional[int] = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__lowerCamelCase : Optional[int] = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__lowerCamelCase : Union[str, Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__lowerCamelCase : List[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__lowerCamelCase : int = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__lowerCamelCase : List[str] = {}
if accepts_eta:
__lowerCamelCase : str = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__lowerCamelCase : str = torch.cat([latents, image] , dim=1 )
__lowerCamelCase : Union[str, Any] = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__lowerCamelCase : Optional[int] = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase : List[Any] = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__lowerCamelCase : Union[str, Any] = self.vqvae.decode(_a ).sample
__lowerCamelCase : Union[str, Any] = torch.clamp(_a , -1.0 , 1.0 )
__lowerCamelCase : List[str] = image / 2 + 0.5
__lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase : Tuple = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 208
| 0
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class snake_case ( unittest.TestCase ):
__magic_name__ = StableDiffusionLDMaDPipeline
__magic_name__ = TEXT_TO_IMAGE_PARAMS
__magic_name__ = TEXT_TO_IMAGE_BATCH_PARAMS
__magic_name__ = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
a : str = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
a : Tuple = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=A , set_alpha_to_one=A , )
torch.manual_seed(0 )
a : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
a : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
a : Optional[int] = CLIPTextModel(A )
a : List[str] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase__ ( self : Tuple , A : int , A : int=0 ):
'''simple docstring'''
if str(A ).startswith('mps' ):
a : Optional[int] = torch.manual_seed(A )
else:
a : Optional[int] = torch.Generator(device=A ).manual_seed(A )
a : Optional[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
a : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Optional[Any] = self.get_dummy_components()
a : List[Any] = StableDiffusionLDMaDPipeline(**A )
a : List[Any] = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : Optional[int] = self.get_dummy_inputs(A )
a : Union[str, Any] = ldmad_pipe(**A )
a : int = output.rgb, output.depth
a : Any = rgb[0, -3:, -3:, -1]
a : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
a : Union[str, Any] = np.array(
[0.37_33_81_76, 0.7_02_47, 0.74_20_31_93, 0.51_64_36_04, 0.58_25_67_93, 0.60_93_21_36, 0.4_18_10_95, 0.48_35_58_77, 0.46_53_52_62] )
a : Union[str, Any] = np.array([1_0_3.4_6_7_2_7, 85.81_20_04, 87.84_92_36] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
a : Union[str, Any] = self.get_dummy_components()
a : Dict = StableDiffusionLDMaDPipeline(**A )
a : Dict = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : Dict = self.get_dummy_inputs(A )
a : Tuple = 3 * [inputs['prompt']]
# forward
a : Dict = ldmad_pipe(**A )
a : Optional[int] = output.rgb, output.depth
a : Tuple = rgb_slice_a[0, -3:, -3:, -1]
a : Dict = depth_slice_a[0, -3:, -1]
a : str = self.get_dummy_inputs(A )
a : str = 3 * [inputs.pop('prompt' )]
a : Any = ldmad_pipe.tokenizer(
A , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=A , return_tensors='pt' , )
a : str = text_inputs['input_ids'].to(A )
a : Any = ldmad_pipe.text_encoder(A )[0]
a : List[str] = prompt_embeds
# forward
a : Optional[Any] = ldmad_pipe(**A )
a : int = output.rgb, output.depth
a : Any = rgb_slice_a[0, -3:, -3:, -1]
a : Dict = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
a : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
a : Union[str, Any] = self.get_dummy_components()
a : Optional[Any] = PNDMScheduler(skip_prk_steps=A )
a : Tuple = StableDiffusionLDMaDPipeline(**A )
a : Optional[int] = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : Union[str, Any] = self.get_dummy_inputs(A )
a : int = 'french fries'
a : Union[str, Any] = ldmad_pipe(**A , negative_prompt=A )
a : int = output.rgb, output.depth
a : Optional[Any] = rgb[0, -3:, -3:, -1]
a : str = depth[0, -3:, -1]
assert rgb.shape == (1, 6_4, 6_4, 3)
assert depth.shape == (1, 6_4, 6_4)
a : Tuple = np.array(
[0.3_70_44, 0.71_81_15_03, 0.7_22_32_51, 0.48_60_36_75, 0.5_63_83_91, 0.6_36_49_48, 0.42_83_37_04, 0.4_90_13_15, 0.47_92_62_17] )
a : int = np.array([1_0_7.8_4_7_3_8, 84.6_28_02, 89.96_21_35] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : Tuple , A : List[str] , A : List[str]="cpu" , A : Optional[Any]=torch.floataa , A : Tuple=0 ):
'''simple docstring'''
a : Tuple = torch.Generator(device=A ).manual_seed(A )
a : Any = np.random.RandomState(A ).standard_normal((1, 4, 6_4, 6_4) )
a : List[str] = torch.from_numpy(A ).to(device=A , dtype=A )
a : str = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
a : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
a : int = ldmad_pipe.to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : Tuple = self.get_inputs(A )
a : Tuple = ldmad_pipe(**A )
a : Optional[Any] = output.rgb, output.depth
a : int = rgb[0, -3:, -3:, -1].flatten()
a : str = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2)
a : int = np.array(
[0.53_80_54_65, 0.56_70_73_05, 0.5_48_65_15, 0.57_01_22_36, 0.5_81_45_11, 0.56_25_34_87, 0.54_84_30_14, 0.55_09_22_63, 0.6_45_97_06] )
a : Tuple = np.array(
[0.9_26_37_81, 0.6_67_86_72, 0.5_48_65_15, 0.92_20_21_45, 0.67_83_11_35, 0.56_25_34_87, 0.9_24_16_94, 0.7_55_14_78, 0.6_45_97_06] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : List[str] , A : List[str] , A : Tuple="cpu" , A : Optional[Any]=torch.floataa , A : int=0 ):
'''simple docstring'''
a : str = torch.Generator(device=A ).manual_seed(A )
a : Optional[int] = np.random.RandomState(A ).standard_normal((1, 4, 6_4, 6_4) )
a : Optional[Any] = torch.from_numpy(A ).to(device=A , dtype=A )
a : List[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 5_0,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
a : List[str] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : Union[str, Any] = self.get_inputs(A )
a : Optional[int] = ldmad_pipe(**A )
a : Union[str, Any] = output.rgb, output.depth
a : str = 0.49_55_86
a : Optional[Any] = 0.33_79_55_15
a : List[str] = 1_1_2.4_8_5_1_8
a : str = 98.48_97_46
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase__ ( self : Optional[int] ):
'''simple docstring'''
a : Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(A )
ldmad_pipe.set_progress_bar_config(disable=A )
a : List[Any] = self.get_inputs(A )
a : Optional[Any] = ldmad_pipe(**A )
a : Dict = output.rgb, output.depth
a : str = 0.4_19_41_27
a : Optional[int] = 0.35_37_55_86
a : Tuple = 0.5_63_85_02
a : Any = 0.34_68_61_03
assert rgb.shape == (1, 5_1_2, 5_1_2, 3)
assert depth.shape == (1, 5_1_2, 5_1_2, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 350
|
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCamelCase : int = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class snake_case ( UpperCAmelCase , unittest.TestCase ):
__magic_name__ = BartphoTokenizer
__magic_name__ = False
__magic_name__ = True
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
super().setUp()
a : Any = ['▁This', '▁is', '▁a', '▁t', 'est']
a : List[Any] = dict(zip(A , range(len(A ) ) ) )
a : int = {'unk_token': '<unk>'}
a : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
a : Optional[int] = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase__ ( self : Dict , **A : str ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **A )
def lowerCamelCase__ ( self : Optional[int] , A : Dict ):
'''simple docstring'''
a : Tuple = 'This is a là test'
a : List[Any] = 'This is a<unk><unk> test'
return input_text, output_text
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
a : Tuple = BartphoTokenizer(A , self.monolingual_vocab_file , **self.special_tokens_map )
a : int = 'This is a là test'
a : int = '▁This ▁is ▁a ▁l à ▁t est'.split()
a : str = tokenizer.tokenize(A )
self.assertListEqual(A , A )
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Dict = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
| 186
| 0
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
lowercase : Optional[Any] = datasets.logging.get_logger(__name__)
lowercase : List[str] = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
lowercase : Any = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
lowercase : Optional[Any] = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def A_ ( A__ , A__ , A__=False , A__=False , A__=True , A__=False , A__="dummy_doc" ) -> Dict:
a__ : Dict = {doc: key_lines}
a__ : List[Any] = {doc: sys_lines}
a__ : str = {}
a__ : List[Any] = 0
a__ : int = 0
a__ : Optional[int] = 0
a__ : Dict = 0
a__ : Dict = 0
a__ : List[Any] = 0
a__ , a__ : str = reader.get_doc_mentions(A__ , key_doc_lines[doc] , A__ )
key_singletons_num += singletons_num
if NP_only or min_span:
a__ : Tuple = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
a__ , a__ : Union[str, Any] = reader.get_doc_mentions(A__ , sys_doc_lines[doc] , A__ )
sys_singletons_num += singletons_num
if NP_only or min_span:
a__ : Dict = reader.set_annotated_parse_trees(A__ , key_doc_lines[doc] , A__ , A__ )
if remove_nested:
a__ , a__ : Optional[int] = reader.remove_nested_coref_mentions(A__ , A__ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
a__ , a__ : Tuple = reader.remove_nested_coref_mentions(A__ , A__ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
a__ : Any = reader.get_mention_assignments(A__ , A__ )
a__ : int = reader.get_mention_assignments(A__ , A__ )
a__ : Dict = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' )
logger.info(
'Number of resulting singleton clusters in the key '
F'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' )
if not keep_singletons:
logger.info(
F'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system '
'files, respectively' )
return doc_coref_infos
def A_ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
a__ : Any = get_coref_infos(A__ , A__ , A__ , A__ , A__ , A__ )
a__ : List[str] = {}
a__ : int = 0
a__ : Dict = 0
for name, metric in metrics:
a__ , a__ , a__ : int = evaluator.evaluate_documents(A__ , A__ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F'{name}/recall': recall, F'{name}/precision': precision, F'{name}/f1': fa} )
logger.info(
name.ljust(10 ) , F'Recall: {recall * 100:.2f}' , F' Precision: {precision * 100:.2f}' , F' F1: {fa * 100:.2f}' , )
if conll_subparts_num == 3:
a__ : List[Any] = (conll / 3) * 100
logger.info(F'CoNLL score: {conll:.2f}' )
output_scores.update({'conll_score': conll} )
return output_scores
def A_ ( A__ ) -> Optional[int]:
a__ : Union[str, Any] = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
a__ : Tuple = line.split()[5]
if not parse_col == "-":
a__ : List[Any] = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string')),
'references': datasets.Sequence(datasets.Value('string')),
}) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def __lowercase ( self , lowercase , lowercase , lowercase=True , lowercase=False , lowercase=False , lowercase=False) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
a__ : str = util.check_gold_parse_annotation(lowercase)
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.')
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
a__ : Optional[Any] = evaluate(
key_lines=lowercase , sys_lines=lowercase , metrics=lowercase , NP_only=lowercase , remove_nested=lowercase , keep_singletons=lowercase , min_span=lowercase , )
return score
| 99
|
from math import loga
def A_ ( A__ ) -> int:
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(A__ , A__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 99
| 1
|
"""simple docstring"""
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
__snake_case : List[str] = 'pt'
elif is_tf_available():
__snake_case : Any = 'tf'
else:
__snake_case : str = 'jax'
class A__ ( lowerCamelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = PerceiverTokenizer
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
super().setUp()
__lowerCAmelCase : Tuple = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname)
@cached_property
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver")
def _SCREAMING_SNAKE_CASE ( self: List[Any] , **_SCREAMING_SNAKE_CASE: Tuple) -> Optional[int]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any]=False , _SCREAMING_SNAKE_CASE: str=20 , _SCREAMING_SNAKE_CASE: List[str]=5) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = []
for i in range(len(__snake_case)):
try:
__lowerCAmelCase : int = tokenizer.decode([i] , clean_up_tokenization_spaces=__snake_case)
except UnicodeDecodeError:
pass
toks.append((i, tok))
__lowerCAmelCase : Any = list(filter(lambda _SCREAMING_SNAKE_CASE: re.match(r"^[ a-zA-Z]+$" , t[1]) , __snake_case))
__lowerCAmelCase : Any = list(filter(lambda _SCREAMING_SNAKE_CASE: [t[0]] == tokenizer.encode(t[1] , add_special_tokens=__snake_case) , __snake_case))
if max_length is not None and len(__snake_case) > max_length:
__lowerCAmelCase : Any = toks[:max_length]
if min_length is not None and len(__snake_case) < min_length and len(__snake_case) > 0:
while len(__snake_case) < min_length:
__lowerCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCAmelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCAmelCase : Tuple = tokenizer.decode(__snake_case , clean_up_tokenization_spaces=__snake_case)
if " " not in output_txt and len(__snake_case) > 1:
__lowerCAmelCase : List[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=__snake_case)
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=__snake_case)
)
if with_prefix_space:
__lowerCAmelCase : List[str] = ' ' + output_txt
__lowerCAmelCase : List[str] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
return output_txt, output_ids
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : str = self.perceiver_tokenizer
__lowerCAmelCase : Optional[Any] = 'Unicode €.'
__lowerCAmelCase : Tuple = tokenizer(__snake_case)
__lowerCAmelCase : Optional[int] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , __snake_case)
# decoding
__lowerCAmelCase : Any = tokenizer.decode(__snake_case)
self.assertEqual(__snake_case , "[CLS]Unicode €.[SEP]")
__lowerCAmelCase : List[str] = tokenizer("e è é ê ë")
__lowerCAmelCase : Dict = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , __snake_case)
# decoding
__lowerCAmelCase : Tuple = tokenizer.decode(__snake_case)
self.assertEqual(__snake_case , "[CLS]e è é ê ë[SEP]")
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")) , "[CLS]e è é ê ë[SEP]")
def _SCREAMING_SNAKE_CASE ( self: str) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.perceiver_tokenizer
__lowerCAmelCase : Optional[int] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__lowerCAmelCase : Dict = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
__lowerCAmelCase : Optional[int] = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
if FRAMEWORK != "jax":
__lowerCAmelCase : Any = list(batch.input_ids.numpy()[0])
else:
__lowerCAmelCase : Optional[int] = list(batch.input_ids.tolist()[0])
self.assertListEqual(__snake_case , __snake_case)
self.assertEqual((2, 38) , batch.input_ids.shape)
self.assertEqual((2, 38) , batch.attention_mask.shape)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.perceiver_tokenizer
__lowerCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : str = tokenizer(__snake_case , padding=__snake_case , return_tensors=__snake_case)
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , __snake_case)
self.assertIn("attention_mask" , __snake_case)
self.assertNotIn("decoder_input_ids" , __snake_case)
self.assertNotIn("decoder_attention_mask" , __snake_case)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.perceiver_tokenizer
__lowerCAmelCase : Union[str, Any] = [
'Summary of the text.',
'Another summary.',
]
__lowerCAmelCase : List[str] = tokenizer(
text_target=__snake_case , max_length=32 , padding="max_length" , truncation=__snake_case , return_tensors=__snake_case)
self.assertEqual(32 , targets["input_ids"].shape[1])
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
self.assertNotEqual(tokenizer.model_max_length , 42)
# Now let's start the test
__lowerCAmelCase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : Optional[int] = tempfile.mkdtemp()
__lowerCAmelCase : Tuple = ' He is very happy, UNwant\u00E9d,running'
__lowerCAmelCase : Any = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
tokenizer.save_pretrained(__snake_case)
__lowerCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(__snake_case)
__lowerCAmelCase : int = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
shutil.rmtree(__snake_case)
__lowerCAmelCase : Dict = self.get_tokenizers(model_max_length=42)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Isolate this from the other tests because we save additional tokens/etc
__lowerCAmelCase : List[str] = tempfile.mkdtemp()
__lowerCAmelCase : int = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(["bim", "bambam"])
__lowerCAmelCase : List[str] = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token")
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens})
__lowerCAmelCase : Union[str, Any] = tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
tokenizer.save_pretrained(__snake_case)
__lowerCAmelCase : int = tokenizer.__class__.from_pretrained(__snake_case)
__lowerCAmelCase : List[Any] = after_tokenizer.encode(__snake_case , add_special_tokens=__snake_case)
self.assertListEqual(__snake_case , __snake_case)
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens)
self.assertEqual(after_tokenizer.model_max_length , 42)
__lowerCAmelCase : str = tokenizer.__class__.from_pretrained(__snake_case , model_max_length=43)
self.assertEqual(tokenizer.model_max_length , 43)
shutil.rmtree(__snake_case)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()))
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()))
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(__snake_case)
with open(os.path.join(__snake_case , "special_tokens_map.json") , encoding="utf-8") as json_file:
__lowerCAmelCase : Dict = json.load(__snake_case)
with open(os.path.join(__snake_case , "tokenizer_config.json") , encoding="utf-8") as json_file:
__lowerCAmelCase : Union[str, Any] = json.load(__snake_case)
__lowerCAmelCase : Dict = [F"""<extra_id_{i}>""" for i in range(125)]
__lowerCAmelCase : str = added_tokens_extra_ids + [
'an_additional_special_token'
]
__lowerCAmelCase : Dict = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(__snake_case , "special_tokens_map.json") , "w" , encoding="utf-8") as outfile:
json.dump(__snake_case , __snake_case)
with open(os.path.join(__snake_case , "tokenizer_config.json") , "w" , encoding="utf-8") as outfile:
json.dump(__snake_case , __snake_case)
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(
__snake_case , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens)
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"])) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__lowerCAmelCase : Dict = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=__snake_case)]
__lowerCAmelCase : Tuple = tokenizer_class.from_pretrained(
__snake_case , additional_special_tokens=__snake_case , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens)
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"])) , )
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178]) , "�")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Optional[int]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: int) -> Optional[Any]:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: str) -> Tuple:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : str = self.get_tokenizers(fast=__snake_case , do_lower_case=__snake_case)
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
__lowerCAmelCase : Optional[Any] = ['[CLS]', 't', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 's', 't', '[SEP]']
__lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_string(__snake_case)
self.assertIsInstance(__snake_case , __snake_case)
| 357
|
"""simple docstring"""
from ....utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Optional[Any]=None , _SCREAMING_SNAKE_CASE: int=2048) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = config.__dict__
__lowerCAmelCase : Dict = modal_hidden_size
if num_labels:
__lowerCAmelCase : Optional[int] = num_labels
| 58
| 0
|
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def lowerCamelCase_ ( lowerCamelCase__ ):
return EnvironmentCommand()
def lowerCamelCase_ ( lowerCamelCase__ ):
return EnvironmentCommand(args.accelerate_config_file )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> str:
lowerCamelCase_ = parser.add_parser("env" )
download_parser.set_defaults(func=lowercase )
download_parser.add_argument(
"--accelerate-config_file" , default=lowercase , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=lowercase )
def __init__( self , lowercase , *lowercase ) -> None:
lowerCamelCase_ = accelerate_config_file
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
lowerCamelCase_ = "not installed"
if is_safetensors_available():
import safetensors
lowerCamelCase_ = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCamelCase_ = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
lowerCamelCase_ = "not installed"
lowerCamelCase_ = lowerCamelCase_ = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCamelCase_ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowercase ):
lowerCamelCase_ = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCamelCase_ = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowercase , lowercase )
else f'\t{accelerate_config}'
)
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_torch_available():
import torch
lowerCamelCase_ = torch.__version__
lowerCamelCase_ = torch.cuda.is_available()
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_tf_available():
import tensorflow as tf
lowerCamelCase_ = tf.__version__
try:
# deprecated in v2.1
lowerCamelCase_ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCamelCase_ = bool(tf.config.list_physical_devices("GPU" ) )
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "not installed"
lowerCamelCase_ = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCamelCase_ = flax.__version__
lowerCamelCase_ = jax.__version__
lowerCamelCase_ = jaxlib.__version__
lowerCamelCase_ = jax.lib.xla_bridge.get_backend().platform
lowerCamelCase_ = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowercase ) )
return info
@staticmethod
def SCREAMING_SNAKE_CASE_( lowercase ) -> List[Any]:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
| 19
|
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a__ = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
a__ = {
'''camembert-base''': 512,
}
a__ = '''▁'''
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ["input_ids", "attention_mask"]
def __init__( self , _a , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=["<s>NOTUSED", "</s>NOTUSED"] , _a = None , **_a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
_a : str = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
_a : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , additional_special_tokens=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
_a : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
_a : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_a : Tuple = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
_a : Union[str, Any] = len(self.fairseq_tokens_to_ids )
_a : str = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_a : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowercase ( self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_a : Dict = [self.cls_token_id]
_a : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowercase ( self , _a , _a = None , _a = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def __lowercase ( self , _a , _a = None ) -> List[int]:
_a : Any = [self.sep_token_id]
_a : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowercase ( self ) -> Dict:
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowercase ( self , _a ) -> List[str]:
return self.sp_model.encode(_a , out_type=_a )
def __lowercase ( self , _a ) -> List[Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(_a ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(_a )
def __lowercase ( self , _a ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowercase ( self , _a ) -> Tuple:
_a : Dict = []
_a : int = ''''''
_a : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
_a : Optional[int] = True
_a : Any = []
else:
current_sub_tokens.append(_a )
_a : str = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self ) -> Union[str, Any]:
_a : Optional[Any] = self.__dict__.copy()
_a : int = None
return state
def __setstate__( self , _a ) -> Optional[int]:
_a : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a : List[str] = {}
_a : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowercase ( self , _a , _a = None ) -> Tuple[str]:
if not os.path.isdir(_a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a : int = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
_a : Tuple = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 15
|
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
a__ = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
a__ = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
a__ = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __UpperCAmelCase ( __a : int ,__a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ,__a : List[str]="binary" ) -> Optional[int]:
"""simple docstring"""
_a : List[str] = simple_accuracy(__a ,__a )
_a : Any = float(fa_score(y_true=__a ,y_pred=__a ,average=__a ) )
return {
"accuracy": acc,
"f1": fa,
}
def __UpperCAmelCase ( __a : Optional[Any] ,__a : str ) -> List[Any]:
"""simple docstring"""
_a : Union[str, Any] = {}
for id_pred, label in zip(__a ,__a ):
_a : Optional[int] = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_a : Optional[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_a : str = [(pred, label)]
_a , _a : Any = [], []
for question, preds_labels in question_map.items():
_a , _a : Any = zip(*__a )
_a : List[Any] = fa_score(y_true=__a ,y_pred=__a ,average='''macro''' )
fas.append(__a )
_a : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__a ) )
ems.append(__a )
_a : List[str] = float(sum(__a ) / len(__a ) )
_a : str = sum(__a ) / len(__a )
_a : Optional[int] = float(fa_score(y_true=__a ,y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __lowercase ( self ) -> Any:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __lowercase ( self , _a , _a ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_a , _a )}
elif self.config_name == "cb":
return acc_and_fa(_a , _a , fa_avg='''macro''' )
elif self.config_name == "record":
_a : Any = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
_a : Any = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_a , _a )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_a , _a )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_a , _a )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 15
| 1
|
def lowerCamelCase__ ( snake_case_ : int = 100 ) -> int:
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : List[Any] = 'vit_msn'
def __init__(self : Union[str, Any] , a__ : Optional[Any]=768 , a__ : Optional[Any]=12 , a__ : Optional[int]=12 , a__ : Optional[int]=3072 , a__ : Union[str, Any]="gelu" , a__ : str=0.0 , a__ : int=0.0 , a__ : Optional[Any]=0.0_2 , a__ : List[Any]=1E-06 , a__ : Optional[int]=224 , a__ : str=16 , a__ : Optional[Any]=3 , a__ : int=True , **a__ : List[Any] , ):
"""simple docstring"""
super().__init__(**a__ )
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = qkv_bias
| 24
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = '''openai/whisper-base'''
_UpperCAmelCase : List[Any] = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
_UpperCAmelCase : Tuple = '''transcriber'''
_UpperCAmelCase : List[Any] = WhisperProcessor
_UpperCAmelCase : Optional[Any] = WhisperForConditionalGeneration
_UpperCAmelCase : str = ['''audio''']
_UpperCAmelCase : Optional[int] = ['''text''']
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.pre_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').input_features
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any):
return self.model.generate(inputs=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)[0]
| 113
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a =TypeVar("""T""")
a =TypeVar("""U""")
class A_ ( Generic[T, U] ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : T | None ,SCREAMING_SNAKE_CASE__ : U | None):
__lowerCamelCase : Optional[Any] = key
__lowerCamelCase : Tuple = val
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
__lowerCamelCase : DoubleLinkedListNode[T, U] | None = None
def __repr__( self : Optional[int]):
return (
F"Node: key: {self.key}, val: {self.val}, "
F"has next: {bool(self.next)}, has prev: {bool(self.prev)}"
)
class A_ ( Generic[T, U] ):
def __init__( self : List[Any]):
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : List[Any] = self.rear, self.head
def __repr__( self : Optional[int]):
__lowerCamelCase : int = ['DoubleLinkedList']
__lowerCamelCase : Tuple = self.head
while node.next is not None:
rep.append(str(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = node.next
rep.append(str(self.rear))
return ",\n ".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U]):
__lowerCamelCase : str = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
__lowerCamelCase : Any = node
__lowerCamelCase : str = previous
__lowerCamelCase : str = node
__lowerCamelCase : List[str] = self.rear
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : DoubleLinkedListNode[T, U]):
if node.prev is None or node.next is None:
return None
__lowerCamelCase : Union[str, Any] = node.next
__lowerCamelCase : Dict = node.prev
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : List[str] = None
return node
class A_ ( Generic[T, U] ):
_UpperCAmelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : DoubleLinkedList[T, U] = DoubleLinkedList()
__lowerCamelCase : List[Any] = capacity
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self : Optional[Any]):
return (
F"CacheInfo(hits={self.hits}, misses={self.miss}, "
F"capacity={self.capacity}, current size={self.num_keys})"
)
def __contains__( self : List[str] ,SCREAMING_SNAKE_CASE__ : T):
return key in self.cache
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : T):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
__lowerCamelCase : DoubleLinkedListNode[T, U] = self.cache[key]
__lowerCamelCase : List[str] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(SCREAMING_SNAKE_CASE__)
return node.val
self.miss += 1
return None
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : T ,SCREAMING_SNAKE_CASE__ : U):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
__lowerCamelCase : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(SCREAMING_SNAKE_CASE__) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
__lowerCamelCase : List[Any] = DoubleLinkedListNode(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
__lowerCamelCase : List[str] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
__lowerCamelCase : List[Any] = value
self.list.add(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : Optional[int] ,SCREAMING_SNAKE_CASE__ : int = 1_2_8):
def cache_decorator_inner(SCREAMING_SNAKE_CASE__ : Callable[[T], U]) -> Callable[..., U]:
def cache_decorator_wrapper(*SCREAMING_SNAKE_CASE__ : T) -> U:
if func not in cls.decorator_function_to_instance_map:
__lowerCamelCase : str = LRUCache(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
__lowerCamelCase : str = func(*SCREAMING_SNAKE_CASE__)
cls.decorator_function_to_instance_map[func].put(args[0] ,SCREAMING_SNAKE_CASE__)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(SCREAMING_SNAKE_CASE__ ,'cache_info' ,SCREAMING_SNAKE_CASE__) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 113
| 1
|
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __SCREAMING_SNAKE_CASE ( ):
print('''Making key files...''' )
make_key_files('''rsa''' , 10_24 )
print('''Key files generation successful.''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
print('''Generating prime p...''' )
lowerCAmelCase__ : List[str] = rabinMiller.generate_large_prime(A_ )
print('''Generating prime q...''' )
lowerCAmelCase__ : Optional[Any] = rabinMiller.generate_large_prime(A_ )
lowerCAmelCase__ : Tuple = p * q
print('''Generating e that is relatively prime to (p - 1) * (q - 1)...''' )
while True:
lowerCAmelCase__ : Any = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(A_ , (p - 1) * (q - 1) ) == 1:
break
print('''Calculating d that is mod inverse of e...''' )
lowerCAmelCase__ : Optional[int] = cryptoMath.find_mod_inverse(A_ , (p - 1) * (q - 1) )
lowerCAmelCase__ : Optional[int] = (n, e)
lowerCAmelCase__ : Union[str, Any] = (n, d)
return (public_key, private_key)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
f'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowerCAmelCase__ ,lowerCAmelCase__ : Any = generate_key(A_ )
print(f'\nWriting public key to file {name}_pubkey.txt...' )
with open(f'{name}_pubkey.txt' , '''w''' ) as out_file:
out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' )
print(f'Writing private key to file {name}_privkey.txt...' )
with open(f'{name}_privkey.txt' , '''w''' ) as out_file:
out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main()
| 106
|
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = d_model
A_ : Optional[Any] = d_kv
A_ : str = d_ff
A_ : int = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[str] = dropout_rate
A_ : Dict = layer_norm_epsilon
A_ : str = initializer_factor
A_ : Dict = feed_forward_proj
A_ : int = use_cache
A_ : Optional[int] = self.feed_forward_proj.split('''-''' )
A_ : Optional[Any] = act_info[-1]
A_ : Optional[Any] = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : List[str] = '''past_encoder_sequence + sequence'''
A_ : Optional[int] = {0: '''batch'''}
A_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 186
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case : List[str] = logging.get_logger(__name__)
_snake_case : Optional[int] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( a__ ):
__lowerCamelCase = 'van'
def __init__( self :int , _lowercase :List[Any]=2_24 , _lowercase :Optional[Any]=3 , _lowercase :Dict=[7, 3, 3, 3] , _lowercase :List[str]=[4, 2, 2, 2] , _lowercase :List[str]=[64, 1_28, 3_20, 5_12] , _lowercase :int=[3, 3, 12, 3] , _lowercase :Optional[Any]=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :Tuple=1e-6 , _lowercase :Dict=1e-2 , _lowercase :Dict=0.0 , _lowercase :Dict=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 370
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class lowerCAmelCase ( yaml.SafeLoader ):
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Any ):
'''simple docstring'''
lowercase__ = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase__ = [tuple(_lowercase ) if isinstance(_lowercase , _lowercase ) else key for key in keys]
lowercase__ = Counter(_lowercase )
lowercase__ = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def UpperCAmelCase ( self :Any , _lowercase :str , _lowercase :Dict=False ):
'''simple docstring'''
lowercase__ = super().construct_mapping(_lowercase , deep=_lowercase )
self._check_no_duplicates_on_constructed_node(_lowercase )
return mapping
def _A ( __magic_name__ ):
lowercase__ = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase__ = full_content[1:].index("---" ) + 1
lowercase__ = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__magic_name__ )
class lowerCAmelCase ( lowercase_ ):
# class attributes
__lowerCamelCase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase ( cls :Dict , _lowercase :Path ):
'''simple docstring'''
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ , lowercase__ = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_lowercase )
else:
return cls()
def UpperCAmelCase ( self :Any , _lowercase :Path ):
'''simple docstring'''
if path.exists():
with open(_lowercase , encoding="utf-8" ) as readme_file:
lowercase__ = readme_file.read()
else:
lowercase__ = None
lowercase__ = self._to_readme(_lowercase )
with open(_lowercase , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(_lowercase )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[str] = None ):
'''simple docstring'''
if readme_content is not None:
lowercase__ , lowercase__ = _split_yaml_from_readme(_lowercase )
lowercase__ = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowercase__ = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = yaml.load(_lowercase , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase__ = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_lowercase )
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_lowercase , allow_unicode=_lowercase , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
"""image-classification""": [],
"""translation""": [],
"""image-segmentation""": [],
"""fill-mask""": [],
"""automatic-speech-recognition""": [],
"""token-classification""": [],
"""sentence-similarity""": [],
"""audio-classification""": [],
"""question-answering""": [],
"""summarization""": [],
"""zero-shot-classification""": [],
"""table-to-text""": [],
"""feature-extraction""": [],
"""other""": [],
"""multiple-choice""": [],
"""text-classification""": [],
"""text-to-image""": [],
"""text2text-generation""": [],
"""zero-shot-image-classification""": [],
"""tabular-classification""": [],
"""tabular-regression""": [],
"""image-to-image""": [],
"""tabular-to-text""": [],
"""unconditional-image-generation""": [],
"""text-retrieval""": [],
"""text-to-speech""": [],
"""object-detection""": [],
"""audio-to-audio""": [],
"""text-generation""": [],
"""conversational""": [],
"""table-question-answering""": [],
"""visual-question-answering""": [],
"""image-to-text""": [],
"""reinforcement-learning""": [],
"""voice-activity-detection""": [],
"""time-series-forecasting""": [],
"""document-question-answering""": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage="""Validate the yaml metadata block of a README.md file.""")
ap.add_argument("""readme_filepath""")
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 201
| 0
|
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 58
| 0
|
"""simple docstring"""
import string
def __A ( a_ :str) -> str:
__a : Tuple = ''''''
for i in sequence:
__a : int = ord(a_)
if 65 <= extract <= 90:
output += chr(1_55 - extract)
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract)
else:
output += i
return output
def __A ( a_ :str) -> str:
__a : Any = string.ascii_letters
__a : Tuple = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(a_)] if c in letters else c for c in sequence)
def __A ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''')
__a : Union[str, Any] = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit("atbash_slow(printable)" , setup=a_)} seconds""")
print(F"""> atbash(): {timeit("atbash(printable)" , setup=a_)} seconds""")
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 188
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __lowercase :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *_UpperCAmelCase , **_UpperCAmelCase ):
pass
def __A ( a_ :Image) -> str:
__a : List[str] = hashlib.mda(image.tobytes())
return m.hexdigest()[:10]
def __A ( a_ :Image) -> Dict:
__a : Any = np.array(a_)
__a : Tuple = npimg.shape
return {"hash": hashimage(a_), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCAmelCase = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[str] = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
pass
@require_tf
@unittest.skip('''Image segmentation not implemented in TF''' )
def _lowerCamelCase ( self ):
pass
@slow
@require_torch
def _lowerCamelCase ( self ):
__a : Dict = pipeline('''mask-generation''' , model='''facebook/sam-vit-huge''' )
__a : Optional[Any] = image_segmenter('''http://images.cocodataset.org/val2017/000000039769.jpg''' , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
{'''mask''': {'''hash''': '''e2d0b7a0b7''', '''shape''': (480, 640)}, '''scores''': 0.9_9_6_7},
{'''mask''': {'''hash''': '''453c7844bd''', '''shape''': (480, 640)}, '''scores''': 0.9_9_3},
{'''mask''': {'''hash''': '''3d44f2926d''', '''shape''': (480, 640)}, '''scores''': 0.9_9_0_9},
{'''mask''': {'''hash''': '''64033ddc3f''', '''shape''': (480, 640)}, '''scores''': 0.9_8_7_9},
{'''mask''': {'''hash''': '''801064ff79''', '''shape''': (480, 640)}, '''scores''': 0.9_8_3_4},
{'''mask''': {'''hash''': '''6172f276ef''', '''shape''': (480, 640)}, '''scores''': 0.9_7_1_6},
{'''mask''': {'''hash''': '''b49e60e084''', '''shape''': (480, 640)}, '''scores''': 0.9_6_1_2},
{'''mask''': {'''hash''': '''a811e775fd''', '''shape''': (480, 640)}, '''scores''': 0.9_5_9_9},
{'''mask''': {'''hash''': '''a6a8ebcf4b''', '''shape''': (480, 640)}, '''scores''': 0.9_5_5_2},
{'''mask''': {'''hash''': '''9d8257e080''', '''shape''': (480, 640)}, '''scores''': 0.9_5_3_2},
{'''mask''': {'''hash''': '''32de6454a8''', '''shape''': (480, 640)}, '''scores''': 0.9_5_1_6},
{'''mask''': {'''hash''': '''af3d4af2c8''', '''shape''': (480, 640)}, '''scores''': 0.9_4_9_9},
{'''mask''': {'''hash''': '''3c6db475fb''', '''shape''': (480, 640)}, '''scores''': 0.9_4_8_3},
{'''mask''': {'''hash''': '''c290813fb9''', '''shape''': (480, 640)}, '''scores''': 0.9_4_6_4},
{'''mask''': {'''hash''': '''b6f0b8f606''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''92ce16bfdf''', '''shape''': (480, 640)}, '''scores''': 0.9_4_3},
{'''mask''': {'''hash''': '''c749b25868''', '''shape''': (480, 640)}, '''scores''': 0.9_4_0_8},
{'''mask''': {'''hash''': '''efb6cab859''', '''shape''': (480, 640)}, '''scores''': 0.9_3_3_5},
{'''mask''': {'''hash''': '''1ff2eafb30''', '''shape''': (480, 640)}, '''scores''': 0.9_3_2_6},
{'''mask''': {'''hash''': '''788b798e24''', '''shape''': (480, 640)}, '''scores''': 0.9_2_6_2},
{'''mask''': {'''hash''': '''abea804f0e''', '''shape''': (480, 640)}, '''scores''': 0.8_9_9_9},
{'''mask''': {'''hash''': '''7b9e8ddb73''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_6},
{'''mask''': {'''hash''': '''cd24047c8a''', '''shape''': (480, 640)}, '''scores''': 0.8_9_8_4},
{'''mask''': {'''hash''': '''6943e6bcbd''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_3},
{'''mask''': {'''hash''': '''b5f47c9191''', '''shape''': (480, 640)}, '''scores''': 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _lowerCamelCase ( self ):
__a : Dict = '''facebook/sam-vit-huge'''
__a : Tuple = pipeline('''mask-generation''' , model=_UpperCAmelCase )
__a : List[Any] = image_segmenter(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__a : Optional[int] = []
for i, o in enumerate(outputs['''masks'''] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{'''mask''': {'''hash''': '''115ad19f5f''', '''shape''': (480, 640)}, '''scores''': 1.0_4_4_4},
{'''mask''': {'''hash''': '''6affa964c6''', '''shape''': (480, 640)}, '''scores''': 1.0_2_1_0},
{'''mask''': {'''hash''': '''dfe28a0388''', '''shape''': (480, 640)}, '''scores''': 1.0_1_6_7},
{'''mask''': {'''hash''': '''c0a5f4a318''', '''shape''': (480, 640)}, '''scores''': 1.0_1_3_2},
{'''mask''': {'''hash''': '''fe8065c197''', '''shape''': (480, 640)}, '''scores''': 1.0_0_5_3},
] , )
| 188
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Optional[int] = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE :Tuple = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
}
}
SCREAMING_SNAKE_CASE :List[Any] = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE :List[str] = '▁'
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any] ,A : List[str] ,A : List[Any]="<s>" ,A : Tuple="</s>" ,A : Any="</s>" ,A : Optional[Any]="<s>" ,A : Tuple="<unk>" ,A : str="<pad>" ,A : int="<mask>" ,A : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] ,A : Optional[Dict[str, Any]] = None ,**A : Optional[Any] ,):
# Mask token behave like a normal word, i.e. include the space before it
__A = AddedToken(A ,lstrip=A ,rstrip=A ) if isinstance(A ,A ) else mask_token
__A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A ,eos_token=A ,unk_token=A ,sep_token=A ,cls_token=A ,pad_token=A ,mask_token=A ,additional_special_tokens=A ,sp_model_kwargs=self.sp_model_kwargs ,**A ,)
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(A ) )
__A = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
__A = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
__A = len(self.fairseq_tokens_to_ids )
__A = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
__A = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def UpperCamelCase_ ( self : int ,A : List[int] ,A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__A = [self.cls_token_id]
__A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase_ ( self : Dict ,A : List[int] ,A : Optional[List[int]] = None ,A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A ,token_ids_a=A ,already_has_special_tokens=A )
if token_ids_a is None:
return [1] + ([0] * len(A )) + [1]
return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1]
def UpperCamelCase_ ( self : Union[str, Any] ,A : List[int] ,A : Optional[List[int]] = None ):
__A = [self.sep_token_id]
__A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def UpperCamelCase_ ( self : Dict ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def UpperCamelCase_ ( self : int ):
__A = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase_ ( self : Any ,A : str ):
return self.sp_model.encode(A ,out_type=A )
def UpperCamelCase_ ( self : List[str] ,A : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(A )
def UpperCamelCase_ ( self : Dict ,A : Tuple ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase_ ( self : Optional[Any] ,A : Dict ):
__A = []
__A = ""
__A = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A ) + token
__A = True
__A = []
else:
current_sub_tokens.append(A )
__A = False
out_string += self.sp_model.decode(A )
return out_string.strip()
def __getstate__( self : Dict ):
__A = self.__dict__.copy()
__A = None
return state
def __setstate__( self : Union[str, Any] ,A : Any ):
__A = d
# for backward compatibility
if not hasattr(self ,"sp_model_kwargs" ):
__A = {}
__A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self : Any ,A : str ,A : Optional[str] = None ):
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__A = os.path.join(
A ,(filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A )
elif not os.path.isfile(self.vocab_file ):
with open(A ,"wb" ) as fi:
__A = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 15
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE :List[Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :List[str] = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15
| 1
|
def UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Dict )-> Tuple:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(_A , n - 1 , _A ) * a) % mod
else:
A__ = binary_exponentiation(_A , n / 2 , _A )
return (b * b) % mod
# a prime number
UpperCAmelCase_ : Optional[Any] = 701
UpperCAmelCase_ : str = 1_000_000_000
UpperCAmelCase_ : int = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 368
|
import math
from collections.abc import Iterator
from itertools import takewhile
def UpperCamelCase ( _A : int )-> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase ( )-> Iterator[int]:
"""simple docstring"""
A__ = 2
while True:
if is_prime(_A ):
yield num
num += 1
def UpperCamelCase ( _A : int = 2000000 )-> int:
"""simple docstring"""
return sum(takewhile(lambda _A : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 198
| 0
|
"""simple docstring"""
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def lowercase (SCREAMING_SNAKE_CASE_ : Union[dict, list, tuple, torch.Tensor] ) -> List[Tuple[int, ...]]:
SCREAMING_SNAKE_CASE = []
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for v in tree.values():
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(SCREAMING_SNAKE_CASE_ ) )
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple[int, ...] ) -> Tuple[int, ...]:
SCREAMING_SNAKE_CASE = []
for d in reversed(SCREAMING_SNAKE_CASE_ ):
idx.append(flat_idx % d )
SCREAMING_SNAKE_CASE = flat_idx // d
return tuple(reversed(SCREAMING_SNAKE_CASE_ ) )
@torch.jit.ignore
def lowercase (SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Sequence[int] , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , SCREAMING_SNAKE_CASE_ : Optional[Sequence[bool]] = None , ) -> List[Tuple[slice, ...]]:
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(SCREAMING_SNAKE_CASE_ : List[bool] ) -> None:
SCREAMING_SNAKE_CASE = True
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
SCREAMING_SNAKE_CASE = -1 * (i + 1)
l[reversed_idx] &= tally
SCREAMING_SNAKE_CASE = l[reversed_idx]
if start_edges is None:
SCREAMING_SNAKE_CASE = [s == 0 for s in start]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
if end_edges is None:
SCREAMING_SNAKE_CASE = [e == (d - 1) for e, d in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
reduce_edge_list(SCREAMING_SNAKE_CASE_ )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return [()]
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
# Dimensions common to start and end can be selected directly
for s, e in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if s == e:
path_list.append(slice(SCREAMING_SNAKE_CASE_ , s + 1 ) )
else:
break
SCREAMING_SNAKE_CASE = tuple(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
# start == end, and we're done
if divergence_idx == len(SCREAMING_SNAKE_CASE_ ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE = start[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
SCREAMING_SNAKE_CASE = end[divergence_idx]
return tuple(
path + (slice(SCREAMING_SNAKE_CASE_ , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
SCREAMING_SNAKE_CASE = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def lowercase (SCREAMING_SNAKE_CASE_ : torch.Tensor , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> torch.Tensor:
SCREAMING_SNAKE_CASE = t.shape[:no_batch_dims]
SCREAMING_SNAKE_CASE = list(_flat_idx_to_idx(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# _get_minimal_slice_set is inclusive
SCREAMING_SNAKE_CASE = list(_flat_idx_to_idx(flat_end - 1 , SCREAMING_SNAKE_CASE_ ) )
# Get an ordered list of slices to perform
SCREAMING_SNAKE_CASE = _get_minimal_slice_set(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def lowercase (SCREAMING_SNAKE_CASE_ : Callable , SCREAMING_SNAKE_CASE_ : Dict[str, Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Any = None , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any:
if not (len(SCREAMING_SNAKE_CASE_ ) > 0):
raise ValueError('Must provide at least one input' )
SCREAMING_SNAKE_CASE = [shape[:no_batch_dims] for shape in _fetch_dims(SCREAMING_SNAKE_CASE_ )]
SCREAMING_SNAKE_CASE = tuple([max(SCREAMING_SNAKE_CASE_ ) for s in zip(*SCREAMING_SNAKE_CASE_ )] )
def _prep_inputs(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
SCREAMING_SNAKE_CASE = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
SCREAMING_SNAKE_CASE = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
SCREAMING_SNAKE_CASE = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
SCREAMING_SNAKE_CASE = tensor_tree_map(_prep_inputs , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = None
if _out is not None:
SCREAMING_SNAKE_CASE = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
SCREAMING_SNAKE_CASE = 1
for d in orig_batch_dims:
flat_batch_dim *= d
SCREAMING_SNAKE_CASE = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(SCREAMING_SNAKE_CASE_ : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = prepped_outputs
for _ in range(SCREAMING_SNAKE_CASE_ ):
# Chunk the input
if not low_mem:
SCREAMING_SNAKE_CASE = _select_chunk
else:
SCREAMING_SNAKE_CASE = partial(
_chunk_slice , flat_start=SCREAMING_SNAKE_CASE_ , flat_end=min(SCREAMING_SNAKE_CASE_ , i + chunk_size ) , no_batch_dims=len(SCREAMING_SNAKE_CASE_ ) , )
SCREAMING_SNAKE_CASE = tensor_tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Run the layer on the chunk
SCREAMING_SNAKE_CASE = layer(**SCREAMING_SNAKE_CASE_ )
# Allocate space for the output
if out is None:
SCREAMING_SNAKE_CASE = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
# Put the chunk in its pre-allocated space
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
def assign(SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ) -> None:
for k, v in da.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
assign(SCREAMING_SNAKE_CASE_ , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
SCREAMING_SNAKE_CASE = da[k]
assign(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for xa, xa in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
SCREAMING_SNAKE_CASE = xa
elif isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
SCREAMING_SNAKE_CASE = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
SCREAMING_SNAKE_CASE = tensor_tree_map(lambda SCREAMING_SNAKE_CASE_ : t.view(orig_batch_dims + t.shape[1:] ) , SCREAMING_SNAKE_CASE_ )
return out
class lowerCAmelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase__ = 512 , ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = max_chunk_size
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
SCREAMING_SNAKE_CASE = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
SCREAMING_SNAKE_CASE = [c for c in candidates if c > min_chunk_size]
SCREAMING_SNAKE_CASE = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCAmelCase__ ) -> bool:
try:
with torch.no_grad():
fn(*lowerCAmelCase__ , chunk_size=lowerCAmelCase__ )
return True
except RuntimeError:
return False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = len(lowerCAmelCase__ ) - 1
while i > min_viable_chunk_size_index:
SCREAMING_SNAKE_CASE = test_chunk_size(candidates[i] )
if not viable:
SCREAMING_SNAKE_CASE = (min_viable_chunk_size_index + i) // 2
else:
SCREAMING_SNAKE_CASE = i
SCREAMING_SNAKE_CASE = (i + len(lowerCAmelCase__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> bool:
SCREAMING_SNAKE_CASE = True
for aa, aa in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
assert type(lowerCAmelCase__ ) == type(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCAmelCase__ , lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
SCREAMING_SNAKE_CASE = [v for _, v in sorted(aa.items() , key=lambda lowerCAmelCase__ : x[0] )]
consistent &= self._compare_arg_caches(lowerCAmelCase__ , lowerCAmelCase__ )
else:
consistent &= aa == aa
return consistent
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> int:
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = tree_map(lambda lowerCAmelCase__ : a.shape if isinstance(lowerCAmelCase__ , torch.Tensor ) else a , lowerCAmelCase__ , lowerCAmelCase__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self._compare_arg_caches(self.cached_arg_data , lowerCAmelCase__ )
else:
# Otherwise, we can reuse the precomputed value
SCREAMING_SNAKE_CASE = False
if not consistent:
SCREAMING_SNAKE_CASE = self._determine_favorable_chunk_size(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 113
|
"""simple docstring"""
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@property
def __A ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __A ( self ) -> List[str]:
SCREAMING_SNAKE_CASE = ort.SessionOptions()
SCREAMING_SNAKE_CASE = False
return options
def __A ( self ) -> Dict:
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , mask_image=lowerCAmelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=lowerCAmelCase__ , output_type='np' , )
SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-2
| 113
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : List[str] ):
torch.manual_seed(0 )
_A = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def lowerCAmelCase_ ( self : Any ):
torch.manual_seed(0 )
_A = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def lowerCAmelCase_ ( self : int ):
torch.manual_seed(0 )
_A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_A = self.dummy_uncond_unet
_A = DDIMScheduler()
_A = self.dummy_vq_model
_A = LDMPipeline(unet=_UpperCAmelCase , vqvae=_UpperCAmelCase , scheduler=_UpperCAmelCase )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type='numpy' ).images
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=2 , output_type='numpy' , return_dict=_UpperCAmelCase )[0]
_A = image[0, -3:, -3:, -1]
_A = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_A = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
_A = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : int ):
_A = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(_UpperCAmelCase )
ldm.set_progress_bar_config(disable=_UpperCAmelCase )
_A = torch.manual_seed(0 )
_A = ldm(generator=_UpperCAmelCase , num_inference_steps=5 , output_type='numpy' ).images
_A = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_A = np.array([0.4399, 0.4_4975, 0.4_6825, 0.474, 0.4359, 0.4581, 0.4_5095, 0.4341, 0.4447] )
_A = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 357
|
"""simple docstring"""
import argparse
from collections import defaultdict
import yaml
a = '''docs/source/en/_toctree.yml'''
def _snake_case ( _snake_case : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_A = defaultdict(_snake_case )
_A = []
_A = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(_snake_case )
_A = new_doc_list
_A = [key for key, value in counts.items() if value > 1]
_A = []
for duplicate_key in duplicates:
_A = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(_snake_case ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_A = sorted(_snake_case , key=lambda _snake_case : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(_snake_case ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(_snake_case )
# Sort
return overview_doc
def _snake_case ( _snake_case : Tuple=False ) -> List[Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_A = api_doc[scheduler_idx]['sections']
_A = clean_doc_toc(_snake_case )
_A = False
if new_scheduler_doc != scheduler_doc:
_A = True
if overwrite:
_A = new_scheduler_doc
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def _snake_case ( _snake_case : str=False ) -> Union[str, Any]:
'''simple docstring'''
with open(_snake_case , encoding='utf-8' ) as f:
_A = yaml.safe_load(f.read() )
# Get to the API doc
_A = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_A = content[api_idx]['sections']
# Then to the model doc
_A = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_A = False
_A = api_doc[pipeline_idx]['sections']
_A = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_A = pipeline_doc['section']
_A = clean_doc_toc(_snake_case )
if overwrite:
_A = new_sub_pipeline_doc
new_pipeline_docs.append(_snake_case )
# sort overall pipeline doc
_A = clean_doc_toc(_snake_case )
if new_pipeline_docs != pipeline_docs:
_A = True
if overwrite:
_A = new_pipeline_docs
if diff:
if overwrite:
_A = api_doc
with open(_snake_case , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(_snake_case , allow_unicode=_snake_case ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 271
| 0
|
"""simple docstring"""
from math import factorial
def _snake_case ( _snake_case : int = 100 ):
return sum(int(_snake_case ) for x in str(factorial(_snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip())))
| 60
|
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float:
if digit_amount > 0:
return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase )
return number - int(__UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 201
| 0
|
UpperCamelCase = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCamelCase = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCamelCase = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 221
|
from statistics import mean, stdev
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = min(lowerCAmelCase_ )
lowerCAmelCase__ = max(lowerCAmelCase_ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , lowerCAmelCase_ ) for x in data]
def _A ( lowerCAmelCase_ : list , lowerCAmelCase_ : int = 3 ):
"""simple docstring"""
lowerCAmelCase__ = mean(lowerCAmelCase_ )
lowerCAmelCase__ = stdev(lowerCAmelCase_ )
# standardize data
return [round((x - mu) / (sigma) , lowerCAmelCase_ ) for x in data]
| 221
| 1
|
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCAmelCase__ ( _A : str , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
a__ =BertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
a__ =BertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 188
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 188
| 1
|
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case ( snake_case__ :Callable[[int | float], int | float] , snake_case__ :int | float , snake_case__ :int | float , snake_case__ :int = 100 , ) -> float:
_A = x_start
_A = fnc(snake_case__)
_A = 0.0
for _ in range(snake_case__):
# Approximates curve as a sequence of linear lines and sums their length
_A = (x_end - x_start) / steps + xa
_A = fnc(snake_case__)
length += math.hypot(xa - xa , fxa - fxa)
# Increment step
_A = xa
_A = fxa
return length
if __name__ == "__main__":
def snake_case ( snake_case__ :List[Any]) -> List[str]:
return math.sin(10 * x)
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
_SCREAMING_SNAKE_CASE = 10
while i <= 100_000:
print(F'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 81
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case ( ) -> List[Any]:
_A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
))
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=snake_case__ , default=1 , help="""Number of TPU cores to use (1 or 8).""")
# positional
parser.add_argument(
"""training_script""" , type=snake_case__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=snake_case__)
return parser.parse_args()
def snake_case ( ) -> List[str]:
_A = parse_args()
# Import training_script as a module.
_A = Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_A = script_fpath.stem
_A = importlib.import_module(snake_case__)
# Patch sys.argv
_A = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 81
| 1
|
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase_ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase_ = re.compile(R'\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCAmelCase_ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> List[str]:
UpperCamelCase__ : List[str] = None
# source code of `config_class`
UpperCamelCase__ : str = inspect.getsource(__UpperCAmelCase )
UpperCamelCase__ : Optional[Any] = _re_checkpoint.findall(__UpperCAmelCase )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('''/''' ):
UpperCamelCase__ : str = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
UpperCamelCase__ : Dict = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
UpperCamelCase__ : Dict = ckpt_name
break
return checkpoint
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
UpperCamelCase__ : str = get_checkpoint_from_config_class(__UpperCAmelCase )
UpperCamelCase__ : List[str] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(__UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
UpperCamelCase__ : Optional[int] = '''\n'''.join(sorted(__UpperCAmelCase ) )
raise ValueError(f"The following configurations don't contain any valid checkpoint:\n{message}" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 201
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__a: Dict = {
"""configuration_ctrl""": ["""CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CTRLConfig"""],
"""tokenization_ctrl""": ["""CTRLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CTRLForSequenceClassification""",
"""CTRLLMHeadModel""",
"""CTRLModel""",
"""CTRLPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCTRLForSequenceClassification""",
"""TFCTRLLMHeadModel""",
"""TFCTRLModel""",
"""TFCTRLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
__a: Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 198
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _snake_case ( metaclass=a__ ):
lowerCAmelCase :Any = ['''note_seq''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(self , ["""note_seq"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""note_seq"""])
@classmethod
def snake_case__ ( cls , *_lowerCamelCase , **_lowerCamelCase):
requires_backends(cls , ["""note_seq"""])
| 283
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Any = g.get_repo("""huggingface/diffusers""" )
UpperCAmelCase__ : Optional[int] = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Any = sorted(issue.get_comments() , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = comments[0] if len(UpperCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 283
| 1
|
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = str(id_ )
lowerCamelCase : List[Any] = None
lowerCamelCase : int = None
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Any = {} # {vertex:distance}
def __lt__( self , __A ):
"""simple docstring"""
return self.key < other.key
def __repr__( self ):
"""simple docstring"""
return self.id
def _snake_case ( self , __A ):
"""simple docstring"""
self.neighbors.append(_a )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = weight
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __a )
graph[b - 1].add_edge(graph[a - 1] , __a )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = []
for u in graph:
lowerCamelCase : Any = math.inf
lowerCamelCase : Optional[Any] = None
lowerCamelCase : Any = 0
lowerCamelCase : Tuple = graph[:]
while q:
lowerCamelCase : Any = min(__a )
q.remove(__a )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
lowerCamelCase : List[Any] = u
lowerCamelCase : Any = u.edges[v.id]
for i in range(1 , len(__a ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for u in graph:
lowerCamelCase : Any = math.inf
lowerCamelCase : List[Any] = None
lowerCamelCase : Union[str, Any] = 0
lowerCamelCase : Tuple = list(__a )
hq.heapify(__a )
while h:
lowerCamelCase : Any = hq.heappop(__a )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
lowerCamelCase : int = u
lowerCamelCase : str = u.edges[v.id]
hq.heapify(__a )
for i in range(1 , len(__a ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def lowercase_( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_a : int = UNetaDModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
return model
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : str = self.dummy_uncond_unet
_a : int = PNDMScheduler()
_a : str = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : Optional[int] = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ).images
_a : List[str] = torch.manual_seed(0 )
_a : Any = pndm(generator=_a ,num_inference_steps=20 ,output_type='numpy' ,return_dict=_a )[0]
_a : List[Any] = image[0, -3:, -3:, -1]
_a : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : List[Any] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[str] = 'google/ddpm-cifar10-32'
_a : str = UNetaDModel.from_pretrained(_a )
_a : Union[str, Any] = PNDMScheduler()
_a : Tuple = PNDMPipeline(unet=_a ,scheduler=_a )
pndm.to(_a )
pndm.set_progress_bar_config(disable=_a )
_a : str = torch.manual_seed(0 )
_a : Optional[Any] = pndm(generator=_a ,output_type='numpy' ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_a : Tuple = np.array([0.1564, 0.1_4645, 0.1406, 0.1_4715, 0.1_2425, 0.1_4045, 0.1_3115, 0.1_2175, 0.125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 271
| 0
|
from functools import reduce
lowercase_ = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str(int(_SCREAMING_SNAKE_CASE ) * int(_SCREAMING_SNAKE_CASE ) ) , n[i : i + 13] ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) - 12 ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 355
|
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
@register_to_config
def __init__( self : str , a : int = 128 , a : int = 256 , a : float = 2000.0 , a : int = 768 , a : int = 12 , a : int = 12 , a : int = 64 , a : int = 2_048 , a : float = 0.1 , )-> Any:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Sequential(
nn.Linear(a , d_model * 4 , bias=a ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=a ) , nn.SiLU() , )
lowercase__ = nn.Embedding(a , a )
lowercase__ = False
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Dropout(p=a )
lowercase__ = nn.ModuleList()
for lyr_num in range(a ):
# FiLM conditional T5 decoder
lowercase__ = DecoderLayer(d_model=a , d_kv=a , num_heads=a , d_ff=a , dropout_rate=a )
self.decoders.append(a )
lowercase__ = TaLayerNorm(a )
lowercase__ = nn.Dropout(p=a )
lowercase__ = nn.Linear(a , a , bias=a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Dict , a : int , a : int )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowercase__ = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowercase__ = self.conditioning_emb(a ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowercase__ = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowercase__ = torch.broadcast_to(
torch.arange(a , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowercase__ = self.position_encoding(a )
lowercase__ = self.continuous_inputs_projection(a )
inputs += position_encodings
lowercase__ = self.dropout(a )
# decoder: No padding present.
lowercase__ = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowercase__ = [(x, self.encoder_decoder_mask(a , a )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowercase__ = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowercase__ = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowercase__ = lyr(
a , conditioning_emb=a , encoder_hidden_states=a , encoder_attention_mask=a , )[0]
lowercase__ = self.decoder_norm(a )
lowercase__ = self.post_dropout(a )
lowercase__ = self.spec_out(a )
return spec_out
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : List[Any] , a : str , a : Union[str, Any] , a : Optional[int] , a : Optional[int] , a : Tuple=1E-6 )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=a , d_kv=a , num_heads=a , dropout_rate=a ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=a , d_kv=a , num_heads=a , dropout_rate=a , layer_norm_epsilon=a , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=a , d_ff=a , dropout_rate=a , layer_norm_epsilon=a ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str , a : Union[str, Any]=None , a : Dict=None , a : Union[str, Any]=None , a : List[Any]=None , a : List[str]=None , )-> List[Any]:
"""simple docstring"""
lowercase__ = self.layer[0](
a , conditioning_emb=a , attention_mask=a , )
if encoder_hidden_states is not None:
lowercase__ = torch.where(encoder_attention_mask > 0 , 0 , -1E1_0 ).to(
encoder_hidden_states.dtype )
lowercase__ = self.layer[1](
a , key_value_states=a , attention_mask=a , )
# Apply Film Conditional Feed Forward layer
lowercase__ = self.layer[-1](a , a )
return (hidden_states,)
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : List[Any] , a : Tuple , a : Union[str, Any] , a : int )-> List[str]:
"""simple docstring"""
super().__init__()
lowercase__ = TaLayerNorm(a )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[Any] , a : Union[str, Any]=None , a : Tuple=None , )-> int:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ = self.FiLMLayer(a , a )
# Self-attention block
lowercase__ = self.attention(a )
lowercase__ = hidden_states + self.dropout(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[int] , a : Optional[int] , a : int , a : Optional[int] , a : Dict , a : Any )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = Attention(query_dim=a , heads=a , dim_head=a , out_bias=a , scale_qk=a )
lowercase__ = TaLayerNorm(a , eps=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[Any] , a : str=None , a : Dict=None , )-> int:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
lowercase__ = self.attention(
a , encoder_hidden_states=a , attention_mask=attention_mask.squeeze(1 ) , )
lowercase__ = hidden_states + self.dropout(a )
return layer_output
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : str , a : int , a : List[Any] , a : List[str] )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = TaDenseGatedActDense(d_model=a , d_ff=a , dropout_rate=a )
lowercase__ = TaFiLMLayer(in_features=d_model * 4 , out_features=a )
lowercase__ = TaLayerNorm(a , eps=a )
lowercase__ = nn.Dropout(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[Any] , a : List[Any]=None )-> List[Any]:
"""simple docstring"""
lowercase__ = self.layer_norm(a )
if conditioning_emb is not None:
lowercase__ = self.film(a , a )
lowercase__ = self.DenseReluDense(a )
lowercase__ = hidden_states + self.dropout(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Optional[Any] , a : Tuple , a : Tuple , a : Dict )-> Union[str, Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Linear(a , a , bias=a )
lowercase__ = nn.Dropout(a )
lowercase__ = NewGELUActivation()
def SCREAMING_SNAKE_CASE_ ( self : Any , a : str )-> str:
"""simple docstring"""
lowercase__ = self.act(self.wi_a(a ) )
lowercase__ = self.wi_a(a )
lowercase__ = hidden_gelu * hidden_linear
lowercase__ = self.dropout(a )
lowercase__ = self.wo(a )
return hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : Dict , a : Dict , a : Dict=1E-6 )-> Optional[int]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Parameter(torch.ones(a ) )
lowercase__ = eps
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=a )
lowercase__ = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowercase__ = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class SCREAMING_SNAKE_CASE (nn.Module ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(a , 3.0 )) ))
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : int , a : Optional[Any] , a : Any )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = nn.Linear(a , out_features * 2 , bias=a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple , a : Dict )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.scale_bias(a )
lowercase__ , lowercase__ = torch.chunk(a , 2 , -1 )
lowercase__ = x * (1 + scale) + shift
return x
| 269
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Any = 'bert'
def __init__( self ,__UpperCAmelCase=3_05_22 ,__UpperCAmelCase=7_68 ,__UpperCAmelCase=12 ,__UpperCAmelCase=12 ,__UpperCAmelCase=30_72 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=1e-12 ,__UpperCAmelCase=0 ,__UpperCAmelCase="absolute" ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> int:
super().__init__(pad_token_id=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class UpperCamelCase__( __A ):
@property
def snake_case__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 221
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCamelCase__( __A ):
lowerCAmelCase__ : List[Any] = 'time_series_transformer'
lowerCAmelCase__ : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = "student_t" ,__UpperCAmelCase = "nll" ,__UpperCAmelCase = 1 ,__UpperCAmelCase = [1, 2, 3, 4, 5, 6, 7] ,__UpperCAmelCase = "mean" ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = 0 ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 32 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = 2 ,__UpperCAmelCase = True ,__UpperCAmelCase = "gelu" ,__UpperCAmelCase = 64 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 0.1 ,__UpperCAmelCase = 1_00 ,__UpperCAmelCase = 0.0_2 ,__UpperCAmelCase=True ,**__UpperCAmelCase ,) -> List[str]:
# time series specific configuration
A__ = prediction_length
A__ = context_length or prediction_length
A__ = distribution_output
A__ = loss
A__ = input_size
A__ = num_time_features
A__ = lags_sequence
A__ = scaling
A__ = num_dynamic_real_features
A__ = num_static_real_features
A__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
A__ = cardinality
else:
A__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
A__ = embedding_dimension
else:
A__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
A__ = num_parallel_samples
# Transformer architecture configuration
A__ = input_size * len(__UpperCAmelCase ) + self._number_of_features
A__ = d_model
A__ = encoder_attention_heads
A__ = decoder_attention_heads
A__ = encoder_ffn_dim
A__ = decoder_ffn_dim
A__ = encoder_layers
A__ = decoder_layers
A__ = dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = encoder_layerdrop
A__ = decoder_layerdrop
A__ = activation_function
A__ = init_std
A__ = use_cache
super().__init__(is_encoder_decoder=__UpperCAmelCase ,**__UpperCAmelCase )
@property
def snake_case__ ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 221
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=400 , a__=True , a__=None , a__=True , ):
_lowerCAmelCase : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : List[str] = batch_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : int = min_resolution
_lowerCAmelCase : int = max_resolution
_lowerCAmelCase : Tuple = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : Optional[Any] = do_normalize
def __A ( self ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = ImageGPTImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : Any = ImageGPTImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """clusters""" ) )
self.assertTrue(hasattr(a__ , """do_resize""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __A ( self ):
_lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
_lowerCAmelCase : Optional[Any] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , obj[key] ) )
else:
self.assertEqual(obj[key] , a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase : Optional[int] = os.path.join(a__ , """image_processor.json""" )
image_processor_first.to_json_file(a__ )
_lowerCAmelCase : Any = self.image_processing_class.from_json_file(a__ ).to_dict()
_lowerCAmelCase : Optional[Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
def __A ( self ):
_lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(a__ )
_lowerCAmelCase : Optional[Any] = self.image_processing_class.from_pretrained(a__ ).to_dict()
_lowerCAmelCase : Union[str, Any] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(a__ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , a__ )
@unittest.skip("""ImageGPT requires clusters at initialization""" )
def __A ( self ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
_lowerCAmelCase : int = load_dataset("""hf-internal-testing/fixtures_image_utils""" ,split="""test""" )
_lowerCAmelCase : Any = Image.open(dataset[4]["""file"""] )
_lowerCAmelCase : str = Image.open(dataset[5]["""file"""] )
_lowerCAmelCase : str = [imagea, imagea]
return images
@require_vision
@require_torch
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Optional[Any] = ImageGPTImageProcessor.from_pretrained("""openai/imagegpt-small""" )
_lowerCAmelCase : List[Any] = prepare_images()
# test non-batched
_lowerCAmelCase : Tuple = image_processing(images[0] , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
_lowerCAmelCase : Optional[int] = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , a__ )
# test batched
_lowerCAmelCase : Optional[int] = image_processing(a__ , return_tensors="""pt""" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
_lowerCAmelCase : List[str] = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , a__ )
| 358
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : Any = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = "visual_bert"
def __init__( self , a__=30522 , a__=768 , a__=512 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=False , a__=True , a__=1 , a__=0 , a__=2 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : int = max_position_embeddings
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Optional[int] = visual_embedding_dim
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Optional[Any] = bypass_transformer
_lowerCAmelCase : List[Any] = special_visual_initialize
| 126
| 0
|
"""simple docstring"""
import sys
lowerCamelCase_ : Dict = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def _A ( lowercase = N ):
"""simple docstring"""
a =-sys.maxsize - 1
for i in range(len(lowercase ) - 12 ):
a =1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
a =product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 81
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : Any = logging.get_logger(__name__)
lowerCamelCase_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
lowerCamelCase_ : str = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
}
lowerCamelCase_ : Optional[int] = {
"""moussaKam/mbarthez""": 1_0_2_4,
"""moussaKam/barthez""": 1_0_2_4,
"""moussaKam/barthez-orangesum-title""": 1_0_2_4,
}
lowerCamelCase_ : Tuple = """▁"""
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A = None , **__A , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
a =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
a ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
a =vocab_file
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
a ={'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
a =len(self.sp_model ) - 1
a ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a =[self.cls_token_id]
a =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> List[int]:
a =[self.sep_token_id]
a =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> int:
a ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE ( self , __A ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Dict:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
a =self.sp_model.PieceToId(__A )
return spm_id if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE ( self , __A ) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(__A )
def SCREAMING_SNAKE_CASE ( self , __A ) -> Tuple:
a =[]
a =''''''
a =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
a =True
a =[]
else:
current_sub_tokens.append(__A )
a =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ) -> Tuple:
a =self.__dict__.copy()
a =None
return state
def __setstate__( self , __A ) -> Tuple:
a =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
a ={}
a =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
a =os.path.join(
__A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , '''wb''' ) as fi:
a =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 81
| 1
|
_snake_case : Any = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case : int = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case : int = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 359
|
from functools import lru_cache
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
if num < 0:
raise ValueError('Number should not be negative.' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 207
| 0
|
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
lowerCamelCase : Tuple = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCamelCase : Dict = int(sequence[i] , 2 )
return sequence
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
lowerCamelCase : Optional[Any] = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
lowerCamelCase : Any = gray_code_sequence_string(bit_count - 1 )
lowerCamelCase : Optional[int] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
lowerCamelCase : int = "0" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
lowerCamelCase : Dict = "1" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283
| 1
|
'''simple docstring'''
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def __a ( ) ->List[str]:
"""simple docstring"""
A = torch.nn.Linear(2 , 4 )
A = torch.optim.AdamW(model.parameters() , lr=1.0 )
A = torch.optim.lr_scheduler.OneCycleLR(UpperCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
A = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
A = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def __a ( UpperCAmelCase ) ->int:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def __a ( UpperCAmelCase ) ->Any:
"""simple docstring"""
A = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(UpperCAmelCase )
class __UpperCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
@require_cuda
def A (self : Dict ):
A = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(__lowercase ):
A = Accelerator(cpu=__lowercase )
def A (self : Tuple ):
A = Accelerator()
A = GradientState()
assert state.num_steps == 1
A = 4
assert state.num_steps == 4
assert state.sync_gradients is True
A = False
assert state.sync_gradients is False
GradientState._reset_state()
def A (self : str ):
A = Accelerator()
A = create_components()
(
A
) = accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def A (self : int ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def A (self : Any ):
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*_lowerCAmelCase : List[Any] , **_lowerCAmelCase : int ):
pass
with patch("""torch.cuda.set_device""" , __lowercase ), patch_environment(ACCELERATE_TORCH_DEVICE="""cuda:64""" ):
A = Accelerator()
self.assertEqual(str(accelerator.state.device ) , """cuda:64""" )
def A (self : Union[str, Any] ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
A = get_signature(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# make sure loaded weights match
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
def A (self : List[Any] ):
A = Accelerator()
A = create_components()
accelerator.prepare(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
A = get_signature(__lowercase )
# saving hook
def save_config(_lowerCAmelCase : int , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ):
A = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(__lowercase , """data.json""" ) , """w""" ) as f:
json.dump(__lowercase , __lowercase )
# loading hook
def load_config(_lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ):
with open(os.path.join(__lowercase , """data.json""" ) , """r""" ) as f:
A = json.load(__lowercase )
A = config['''class_name''']
A = accelerator.register_save_state_pre_hook(__lowercase )
A = accelerator.register_load_state_pre_hook(__lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
A = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(__lowercase )
# make sure random weights don't match with hooks removed
load_random_weights(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) > 1e-3 )
# random class name to verify correct one is loaded
A = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(__lowercase )
self.assertTrue(abs(model_signature - get_signature(__lowercase ) ) < 1e-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def A (self : Dict ):
A = Accelerator()
A = create_components()
A = None
# This should work
A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertTrue(dummy_obj is None )
def A (self : Any ):
A = Accelerator()
A = create_components()
A = [1, 2, 3]
# This should work
A = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Dummy object should have `_is_accelerate_prepared` set to `True`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Model is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Optimizer is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Scheduler is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
self.assertEqual(
getattr(__lowercase , """_is_accelerate_prepared""" , __lowercase ) , __lowercase , """Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`""" , )
@slow
@require_bnb
def A (self : Optional[Any] ):
from transformers import AutoModelForCausalLM
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map={"""""": 0} , )
A = Accelerator()
# This should work
A = accelerator.prepare(__lowercase )
@slow
@require_bnb
def A (self : int ):
from transformers import AutoModelForCausalLM
A = Accelerator()
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A = infer_auto_device_map(__lowercase )
A = '''cpu'''
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , device_map=__lowercase , load_in_abit=__lowercase , llm_inta_enable_fpaa_cpu_offload=__lowercase )
# This should not work and get value error
with self.assertRaises(__lowercase ):
A = accelerator.prepare(__lowercase )
@slow
@require_bnb
@require_multi_gpu
def A (self : Optional[Any] ):
from transformers import AutoModelForCausalLM
A = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
model.tie_weights()
A = infer_auto_device_map(__lowercase )
A = 1
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map=__lowercase , )
A = Accelerator()
# This should not work and get value error
with self.assertRaises(__lowercase ):
A = accelerator.prepare(__lowercase )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def A (self : Union[str, Any] ):
from transformers import AutoModelForCausalLM
with init_empty_weights():
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , )
A = infer_auto_device_map(__lowercase )
A = 1
A = AutoModelForCausalLM.from_pretrained(
"""EleutherAI/gpt-neo-125m""" , load_in_abit=__lowercase , device_map=__lowercase , )
A = Accelerator()
# This should work
A = accelerator.prepare(__lowercase )
@require_cuda
def A (self : List[str] ):
A = torch.nn.Linear(10 , 10 )
A = torch.optim.SGD(model.parameters() , lr=0.01 )
A = Accelerator(cpu=__lowercase )
A = accelerator.prepare(__lowercase )
| 367
|
'''simple docstring'''
import math
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : int , _lowerCAmelCase : List[Any]=0 ): # a graph with Node 0,1,...,N-1
A = n
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # adjacency matrix for weight
A = [
[math.inf for j in range(0 , _lowerCAmelCase )] for i in range(0 , _lowerCAmelCase )
] # dp[i][j] stores minimum distance from i to j
def A (self : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ):
A = w
def A (self : Union[str, Any] ):
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
A = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def A (self : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] ):
return self.dp[u][v]
if __name__ == "__main__":
_lowerCamelCase : str = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 337
| 0
|
"""simple docstring"""
from random import shuffle
import tensorflow as tf
from numpy import array
def _lowerCamelCase( a , a ):
__a = int(__snake_case )
assert noofclusters < len(__snake_case )
# Find out the dimensionality
__a = len(vectors[0] )
# Will help select random centroids from among the available vectors
__a = list(range(len(__snake_case ) ) )
shuffle(__snake_case )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__a = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__a = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__a = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__snake_case )
]
##These nodes will assign the centroid Variables the appropriate
##values
__a = tf.placeholder("float64" , [dim] )
__a = []
for centroid in centroids:
cent_assigns.append(tf.assign(__snake_case , __snake_case ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__a = [tf.Variable(0 ) for i in range(len(__snake_case ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__a = tf.placeholder("int32" )
__a = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__snake_case , __snake_case ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__a = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__a = tf.reduce_mean(__snake_case , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__a = tf.placeholder("float" , [dim] )
__a = tf.placeholder("float" , [dim] )
__a = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__snake_case , __snake_case ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__a = tf.placeholder("float" , [noofclusters] )
__a = tf.argmin(__snake_case , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__a = tf.initialize_all_variables()
# Initialize all variables
sess.run(__snake_case )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__a = 1_0_0
for _ in range(__snake_case ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__snake_case ) ):
__a = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__a = [
sess.run(__snake_case , feed_dict={va: vect, va: sess.run(__snake_case )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__a = sess.run(
__snake_case , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__snake_case ):
# Collect all the vectors assigned to this cluster
__a = [
vectors[i]
for i in range(len(__snake_case ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__a = sess.run(
__snake_case , feed_dict={mean_input: array(__snake_case )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__a = sess.run(__snake_case )
__a = sess.run(__snake_case )
return centroids, assignments
| 261
|
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__snake_case : Optional[int] = 50_000
__snake_case : Dict = 5_000
__snake_case , __snake_case : Union[str, Any] = os.path.split(__file__)
__snake_case : Any = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def _lowercase ( __snake_case ,__snake_case ) -> Dict:
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
for i in range(0 ,len(__snake_case ) ,__snake_case ):
__lowerCAmelCase : List[str] = dataset[i : i + batch_size]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> Dict:
with dataset.formatted_as(type=__snake_case ):
for i in range(__snake_case ):
__lowerCAmelCase : Union[str, Any] = dataset[i]
@get_duration
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case ) -> str:
with dataset.formatted_as(type=__snake_case ):
for i in range(0 ,__snake_case ,__snake_case ):
__lowerCAmelCase : Optional[int] = dataset[i : i + batch_size]
def _lowercase ( ) -> Union[str, Any]:
__lowerCAmelCase : Optional[int] = {"num examples": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase : Optional[int] = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted, {"type": "pandas", "length": SMALL_TEST}),
(read_formatted, {"type": "torch", "length": SMALL_TEST}),
(read_formatted, {"type": "tensorflow", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
__lowerCAmelCase : Any = [
(read, {"length": SMALL_TEST}),
(read, {"length": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 10}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 100}),
(read_batch, {"length": SPEED_TEST_N_EXAMPLES, "batch_size": 1_000}),
(read_formatted, {"type": "numpy", "length": SMALL_TEST}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 10}),
(read_formatted_batch, {"type": "numpy", "length": SMALL_TEST, "batch_size": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("generating dataset" )
__lowerCAmelCase : int = datasets.Features(
{"list": datasets.Sequence(datasets.Value("float32" ) ), "numbers": datasets.Value("float32" )} )
__lowerCAmelCase : str = generate_example_dataset(
os.path.join(__snake_case ,"dataset.arrow" ) ,__snake_case ,num_examples=__snake_case ,seq_shapes={"list": (100,)} ,)
print("first set of iterations" )
for func, kwargs in functions:
print(func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : str = func(__snake_case ,**__snake_case )
print("shuffling dataset" )
__lowerCAmelCase : Optional[int] = dataset.shuffle()
print("Second set of iterations (after shuffling" )
for func, kwargs in functions_shuffled:
print("shuffled " ,func.__name__ ,str(__snake_case ) )
__lowerCAmelCase : List[str] = func(
__snake_case ,**__snake_case )
with open(__snake_case ,"wb" ) as f:
f.write(json.dumps(__snake_case ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 269
| 0
|
def __lowerCamelCase (UpperCAmelCase__ : int = 5_0_0_0_0_0_0_0 ):
SCREAMING_SNAKE_CASE = set()
SCREAMING_SNAKE_CASE = int((limit - 2_4) ** (1 / 2) )
SCREAMING_SNAKE_CASE = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , UpperCAmelCase__ ) ) )
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
SCREAMING_SNAKE_CASE = primea * primea * primea * primea
SCREAMING_SNAKE_CASE = square + cube + tetr
if total >= limit:
break
ret.add(UpperCAmelCase__ )
return len(UpperCAmelCase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 206
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] ):
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_value
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 206
| 1
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''EleutherAI/gpt-j-6B''': '''https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json''',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class snake_case_ ( A__ ):
__A : Tuple = "gptj"
__A : Dict = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowercase_ : int=5_04_00 , lowercase_ : Any=20_48 , lowercase_ : List[str]=40_96 , lowercase_ : Optional[Any]=28 , lowercase_ : Optional[Any]=16 , lowercase_ : List[str]=64 , lowercase_ : Any=None , lowercase_ : str="gelu_new" , lowercase_ : Any=0.0 , lowercase_ : int=0.0 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[Any]=1E-5 , lowercase_ : Dict=0.02 , lowercase_ : Dict=True , lowercase_ : Optional[int]=5_02_56 , lowercase_ : str=5_02_56 , lowercase_ : Any=False , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]:
lowercase__ : str = vocab_size
lowercase__ : Optional[int] = n_positions
lowercase__ : Any = n_embd
lowercase__ : Dict = n_layer
lowercase__ : List[Any] = n_head
lowercase__ : Dict = n_inner
lowercase__ : Optional[Any] = rotary_dim
lowercase__ : Union[str, Any] = activation_function
lowercase__ : str = resid_pdrop
lowercase__ : List[Any] = embd_pdrop
lowercase__ : Any = attn_pdrop
lowercase__ : int = layer_norm_epsilon
lowercase__ : str = initializer_range
lowercase__ : Tuple = use_cache
lowercase__ : Optional[int] = bos_token_id
lowercase__ : List[Any] = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ )
class snake_case_ ( A__ ):
def __init__( self : str , lowercase_ : PretrainedConfig , lowercase_ : str = "default" , lowercase_ : List[PatchingSpec] = None , lowercase_ : bool = False , ) -> List[str]:
super().__init__(lowerCamelCase_ , task=lowerCamelCase_ , patching_specs=lowerCamelCase_ , use_past=lowerCamelCase_ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase_ ):
# TODO: how to do that better?
lowercase__ : str = 0
@property
def __UpperCamelCase ( self : List[Any] ) -> Dict:
lowercase__ : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase_ , direction="inputs" )
lowercase__ : List[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
lowercase__ : Optional[Any] = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
return self._config.n_layer
@property
def __UpperCamelCase ( self : int ) -> List[Any]:
return self._config.n_head
def __UpperCamelCase ( self : int , lowercase_ : PreTrainedTokenizer , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , ) -> Optional[int]:
lowercase__ : Union[str, Any] = super(lowerCamelCase_ , self ).generate_dummy_inputs(
lowerCamelCase_ , batch_size=lowerCamelCase_ , seq_length=lowerCamelCase_ , is_pair=lowerCamelCase_ , framework=lowerCamelCase_ )
# We need to order the input in the way they appears in the forward()
lowercase__ : str = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
lowercase__ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
lowercase__ : Dict = seqlen + 2
lowercase__ : Optional[Any] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowercase__ : str = [
(torch.zeros(lowerCamelCase_ ), torch.zeros(lowerCamelCase_ )) for _ in range(self.num_layers )
]
lowercase__ : int = common_inputs['attention_mask']
if self.use_past:
lowercase__ : Any = ordered_inputs['attention_mask'].dtype
lowercase__ : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase_ , lowerCamelCase_ , dtype=lowerCamelCase_ )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : List[Any] ) -> Optional[int]:
return 13
| 87
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AudioLDMPipeline
SCREAMING_SNAKE_CASE_ = TEXT_TO_AUDIO_PARAMS
SCREAMING_SNAKE_CASE_ = TEXT_TO_AUDIO_BATCH_PARAMS
SCREAMING_SNAKE_CASE_ = frozenset(
[
"""num_inference_steps""",
"""num_waveforms_per_prompt""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=(32, 64) , class_embed_type='simple_projection' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=lowerCamelCase_ , )
lowerCamelCase__ : Any =DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase_ , set_alpha_to_one=lowerCamelCase_ , )
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase__ : Union[str, Any] =ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , projection_dim=32 , )
lowerCamelCase__ : Any =ClapTextModelWithProjection(lowerCamelCase_ )
lowerCamelCase__ : str =RobertaTokenizer.from_pretrained('hf-internal-testing/tiny-random-roberta' , model_max_length=77 )
lowerCamelCase__ : int =SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16_000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =SpeechTaHifiGan(lowerCamelCase_ )
lowerCamelCase__ : Optional[int] ={
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'vocoder': vocoder,
}
return components
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Dict , lowerCamelCase_ :List[Any]=0 ):
"""simple docstring"""
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : Optional[int] =torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : int =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] ={
'prompt': 'A hammer hitting a wooden surface',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
}
return inputs
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : List[str] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : int =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
lowerCamelCase__ : int =audio[:10]
lowerCamelCase__ : Dict =np.array(
[-0.00_50, 0.00_50, -0.00_60, 0.00_33, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_33] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.get_dummy_components()
lowerCamelCase__ : str =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =audioldm_pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[str] =3 * [inputs['prompt']]
# forward
lowerCamelCase__ : int =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =output.audios[0]
lowerCamelCase__ : List[Any] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =3 * [inputs.pop('prompt' )]
lowerCamelCase__ : List[str] =audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='pt' , )
lowerCamelCase__ : Optional[int] =text_inputs['input_ids'].to(lowerCamelCase_ )
lowerCamelCase__ : str =audioldm_pipe.text_encoder(
lowerCamelCase_ , )
lowerCamelCase__ : Any =prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase__ : Tuple =F.normalize(lowerCamelCase_ , dim=-1 )
lowerCamelCase__ : int =prompt_embeds
# forward
lowerCamelCase__ : List[str] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.get_dummy_components()
lowerCamelCase__ : Union[str, Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.to(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =3 * ['this is a negative prompt']
lowerCamelCase__ : Union[str, Any] =negative_prompt
lowerCamelCase__ : Union[str, Any] =3 * [inputs['prompt']]
# forward
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
lowerCamelCase__ : Optional[int] =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =3 * [inputs.pop('prompt' )]
lowerCamelCase__ : Union[str, Any] =[]
for p in [prompt, negative_prompt]:
lowerCamelCase__ : List[str] =audioldm_pipe.tokenizer(
lowerCamelCase_ , padding='max_length' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=lowerCamelCase_ , return_tensors='pt' , )
lowerCamelCase__ : Optional[Any] =text_inputs['input_ids'].to(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.text_encoder(
lowerCamelCase_ , )
lowerCamelCase__ : List[str] =text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase__ : List[Any] =F.normalize(lowerCamelCase_ , dim=-1 )
embeds.append(lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =embeds
# forward
lowerCamelCase__ : Any =audioldm_pipe(**lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
lowerCamelCase__ : List[Any] ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : List[Any] =self.get_dummy_components()
lowerCamelCase__ : List[Any] =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : int =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] ='egg cracking'
lowerCamelCase__ : int =audioldm_pipe(**lowerCamelCase_ , negative_prompt=lowerCamelCase_ )
lowerCamelCase__ : Dict =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 256
lowerCamelCase__ : Optional[Any] =audio[:10]
lowerCamelCase__ : str =np.array(
[-0.00_51, 0.00_50, -0.00_60, 0.00_34, -0.00_26, 0.00_33, -0.00_27, 0.00_33, -0.00_28, 0.00_32] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Tuple ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : int =self.get_dummy_components()
lowerCamelCase__ : Optional[Any] =PNDMScheduler(skip_prk_steps=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] ='A hammer hitting a wooden surface'
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase__ : int =audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase__ : Union[str, Any] =2
lowerCamelCase__ : Optional[int] =audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase__ : Optional[Any] =2
lowerCamelCase__ : List[Any] =audioldm_pipe(lowerCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase__ : List[Any] =2
lowerCamelCase__ : str =audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=lowerCamelCase_ ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str ='cpu' # ensure determinism for the device-dependent torch.Generator
lowerCamelCase__ : str =self.get_dummy_components()
lowerCamelCase__ : Tuple =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase__ : int =self.get_dummy_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =audioldm_pipe(audio_length_in_s=0.0_16 , **lowerCamelCase_ )
lowerCamelCase__ : List[Any] =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_16
lowerCamelCase__ : Optional[Any] =audioldm_pipe(audio_length_in_s=0.0_32 , **lowerCamelCase_ )
lowerCamelCase__ : Any =output.audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) / vocoder_sampling_rate == 0.0_32
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =self.get_dummy_components()
lowerCamelCase__ : List[Any] =AudioLDMPipeline(**lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any =['hey']
lowerCamelCase__ : Dict =audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
lowerCamelCase__ : int =output.audios.shape
assert audio_shape == (1, 256)
lowerCamelCase__ : Union[str, Any] =audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase__ : Tuple =SpeechTaHifiGan(lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCamelCase__ : Tuple =audioldm_pipe(lowerCamelCase_ , num_inference_steps=1 )
lowerCamelCase__ : List[str] =output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCamelCase_ )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=lowerCamelCase_ )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def UpperCAmelCase__ ( self :Optional[int] ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCamelCase_ )
@slow
class A_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self :Optional[int] , lowerCamelCase_ :Any , lowerCamelCase_ :List[str]="cpu" , lowerCamelCase_ :Tuple=torch.floataa , lowerCamelCase_ :str=0 ):
"""simple docstring"""
lowerCamelCase__ : Tuple =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : List[str] =np.random.RandomState(lowerCamelCase_ ).standard_normal((1, 8, 128, 16) )
lowerCamelCase__ : Tuple =torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ , dtype=lowerCamelCase_ )
lowerCamelCase__ : Dict ={
'prompt': 'A hammer hitting a wooden surface',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 2.5,
}
return inputs
def UpperCAmelCase__ ( self :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCamelCase__ : List[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Dict =self.get_inputs(lowerCamelCase_ )
lowerCamelCase__ : Any =25
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81_920
lowerCamelCase__ : Dict =audio[77_230:77_240]
lowerCamelCase__ : List[Any] =np.array(
[-0.48_84, -0.46_07, 0.00_23, 0.50_07, 0.58_96, 0.51_51, 0.38_13, -0.02_08, -0.36_87, -0.43_15] )
lowerCamelCase__ : Any =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Any =AudioLDMPipeline.from_pretrained('cvssp/audioldm' )
lowerCamelCase__ : int =LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase__ : List[Any] =audioldm_pipe.to(lowerCamelCase_ )
audioldm_pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] =self.get_inputs(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =audioldm_pipe(**lowerCamelCase_ ).audios[0]
assert audio.ndim == 1
assert len(lowerCamelCase_ ) == 81_920
lowerCamelCase__ : Union[str, Any] =audio[27_780:27_790]
lowerCamelCase__ : Dict =np.array([-0.21_31, -0.08_73, -0.01_24, -0.01_89, 0.05_69, 0.13_73, 0.18_83, 0.28_86, 0.32_97, 0.22_12] )
lowerCamelCase__ : Tuple =np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 126
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> int:
_a = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split()
_a = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
_a = {
'''unk_token''': '''<unk>''',
'''bos_token''': '''<s>''',
'''eos_token''': '''</s>''',
}
_a = {
'''feature_size''': 1,
'''padding_value''': 0.0,
'''sampling_rate''': 16000,
'''return_attention_mask''': False,
'''do_normalize''': True,
}
_a = tempfile.mkdtemp()
_a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_a = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
# load decoder from hub
_a = '''hf-internal-testing/ngram-beam-search-decoder'''
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> List[str]:
_a = self.add_kwargs_tokens_map.copy()
kwargs.update(UpperCamelCase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> Any:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _UpperCAmelCase ( self , **__UpperCAmelCase ) -> str:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = self.get_tokenizer()
_a = self.get_feature_extractor()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , UpperCamelCase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> str:
_a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
_a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['''xx'''] )
with self.assertRaisesRegex(UpperCamelCase__ , '''include''' ):
WavaVecaProcessorWithLM(
tokenizer=UpperCamelCase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = floats_list((3, 1000) )
_a = feature_extractor(UpperCamelCase__ , return_tensors='''np''' )
_a = processor(UpperCamelCase__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _UpperCAmelCase ( self ) -> Dict:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = '''This is a test string'''
_a = processor(text=UpperCamelCase__ )
_a = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _UpperCAmelCase ( self , __UpperCAmelCase=(2, 10, 16) , __UpperCAmelCase=77 ) -> int:
np.random.seed(UpperCamelCase__ )
return np.random.rand(*UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = self._get_dummy_logits(shape=(10, 16) , seed=13 )
_a = processor.decode(UpperCamelCase__ )
_a = decoder.decode_beams(UpperCamelCase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('''</s> <s> </s>''' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['''fork'''], ['''spawn''']] )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
_a = processor.batch_decode(UpperCamelCase__ )
else:
with get_context(UpperCamelCase__ ).Pool() as pool:
_a = processor.batch_decode(UpperCamelCase__ , UpperCamelCase__ )
_a = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as p:
_a = decoder.decode_beams_batch(UpperCamelCase__ , UpperCamelCase__ )
_a , _a , _a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(UpperCamelCase__ , decoded_processor.text )
self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text )
self.assertListEqual(UpperCamelCase__ , decoded_processor.logit_score )
self.assertListEqual(UpperCamelCase__ , decoded_processor.lm_score )
def _UpperCAmelCase ( self ) -> List[str]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = self._get_dummy_logits()
_a = 15
_a = -20.0
_a = -4.0
_a = processor.batch_decode(
UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
_a = decoded_processor_out.text
_a = list(UpperCamelCase__ )
with get_context('''fork''' ).Pool() as pool:
_a = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , beam_width=UpperCamelCase__ , beam_prune_logp=UpperCamelCase__ , token_min_logp=UpperCamelCase__ , )
_a = [d[0][0] for d in decoded_decoder_out]
_a = [d[0][2] for d in decoded_decoder_out]
_a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , UpperCamelCase__ )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , UpperCamelCase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(UpperCamelCase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9474] , UpperCamelCase__ , atol=1e-3 ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
_a = self._get_dummy_logits()
_a = 2.0
_a = 5.0
_a = -20.0
_a = True
_a = processor.batch_decode(
UpperCamelCase__ , alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
_a = decoded_processor_out.text
_a = list(UpperCamelCase__ )
decoder.reset_params(
alpha=UpperCamelCase__ , beta=UpperCamelCase__ , unk_score_offset=UpperCamelCase__ , lm_score_boundary=UpperCamelCase__ , )
with get_context('''fork''' ).Pool() as pool:
_a = decoder.decode_beams_batch(
UpperCamelCase__ , UpperCamelCase__ , )
_a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , UpperCamelCase__ )
_a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_a = os.listdir(UpperCamelCase__ )
_a = ['''alphabet.json''', '''language_model''']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
_a = snapshot_download('''hf-internal-testing/processor_with_lm''' )
_a = WavaVecaProcessorWithLM.from_pretrained(UpperCamelCase__ )
_a = processor.decoder.model_container[processor.decoder._model_key]
_a = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute()
_a = os.listdir(UpperCamelCase__ )
_a = os.listdir(UpperCamelCase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self ) -> List[str]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = floats_list((3, 1000) )
_a = processor_wavaveca(UpperCamelCase__ , return_tensors='''np''' )
_a = processor_auto(UpperCamelCase__ , return_tensors='''np''' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
_a = self._get_dummy_logits()
_a = processor_wavaveca.batch_decode(UpperCamelCase__ )
_a = processor_auto.batch_decode(UpperCamelCase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def _UpperCAmelCase ( self ) -> int:
_a = self.get_feature_extractor()
_a = self.get_tokenizer()
_a = self.get_decoder()
_a = WavaVecaProcessorWithLM(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , decoder=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , )
@staticmethod
def _UpperCAmelCase ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
_a = [d[key] for d in offsets]
return retrieved_list
def _UpperCAmelCase ( self ) -> List[str]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = self._get_dummy_logits()[0]
_a = processor.decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] )
def _UpperCAmelCase ( self ) -> Optional[int]:
_a = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' )
_a = self._get_dummy_logits()
_a = processor.batch_decode(UpperCamelCase__ , output_word_offsets=UpperCamelCase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('''text''' in outputs )
self.assertTrue('''word_offsets''' in outputs )
self.assertTrue(isinstance(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertListEqual(
[''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def _UpperCAmelCase ( self ) -> Tuple:
import torch
_a = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=UpperCamelCase__ )
_a = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=16000 ) )
_a = iter(UpperCamelCase__ )
_a = next(UpperCamelCase__ )
_a = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
_a = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
_a = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values
with torch.no_grad():
_a = model(UpperCamelCase__ ).logits.cpu().numpy()
_a = processor.decode(logits[0] , output_word_offsets=UpperCamelCase__ )
_a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
_a = [
{
'''start_time''': d['''start_offset'''] * time_offset,
'''end_time''': d['''end_offset'''] * time_offset,
'''word''': d['''word'''],
}
for d in output['''word_offsets''']
]
_a = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'''
# output words
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , UpperCamelCase__ )
self.assertEqual(''' '''.join(self.get_from_offsets(UpperCamelCase__ , '''word''' ) ) , output.text )
# output times
_a = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''start_time''' ) )
_a = torch.tensor(self.get_from_offsets(UpperCamelCase__ , '''end_time''' ) )
# fmt: off
_a = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
_a = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=0.01 ) )
| 369
|
"""simple docstring"""
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A_ ( _lowerCAmelCase : Dict, _lowerCAmelCase : List[str], _lowerCAmelCase : List[str] ):
"""simple docstring"""
_a = BertConfig.from_json_file(_lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
_a = BertForPreTraining(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict(), _lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 153
| 0
|
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowercase ( unittest.TestCase ):
_a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def a__ ( self , _a , _a , _a ) -> Any:
_A : List[Any] = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Tuple = VideoClassificationPipeline(model=_a , image_processor=_a , top_k=2 )
_A : Optional[Any] = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def a__ ( self , _a , _a ) -> Any:
for example in examples:
_A : Optional[int] = video_classifier(_a )
self.assertEqual(
_a , [
{"""score""": ANY(_a ), """label""": ANY(_a )},
{"""score""": ANY(_a ), """label""": ANY(_a )},
] , )
@require_torch
def a__ ( self ) -> str:
_A : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
_A : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
_A : Any = pipeline(
"""video-classification""" , model=_a , feature_extractor=_a , frame_sampling_rate=4 )
_A : Optional[int] = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
_A : Any = video_classifier(_a , top_k=2 )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , )
_A : Optional[int] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_a , decimals=4 ) , [
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
[{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}],
] , )
@require_tf
def a__ ( self ) -> str:
pass
| 26
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any], lowerCamelCase : Optional[int], lowerCamelCase : str=14, lowerCamelCase : List[str]=7, lowerCamelCase : Dict=True, lowerCamelCase : List[str]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Optional[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : List[str]=99, lowerCamelCase : Optional[Any]=32, lowerCamelCase : Optional[int]=5, lowerCamelCase : List[Any]=4, lowerCamelCase : List[Any]=37, lowerCamelCase : List[str]="gelu", lowerCamelCase : Any=0.1, lowerCamelCase : Optional[Any]=0.1, lowerCamelCase : List[Any]=512, lowerCamelCase : List[str]=16, lowerCamelCase : Dict=2, lowerCamelCase : Union[str, Any]=0.02, lowerCamelCase : Optional[int]=3, lowerCamelCase : Dict=4, lowerCamelCase : List[Any]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_token_type_ids
lowercase__ = use_input_mask
lowercase__ = use_labels
lowercase__ = use_mc_token_ids
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = self.vocab_size - 1
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
lowercase__ = None
if self.use_mc_token_ids:
lowercase__ = ids_tensor([self.batch_size, self.num_choices], self.seq_length )
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
lowercase__ = ids_tensor([self.batch_size], self.num_choices )
lowercase__ = self.get_config()
lowercase__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
def lowercase__ ( self : Union[str, Any], lowerCamelCase : Any, lowerCamelCase : Tuple, lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Union[str, Any], *lowerCamelCase : Dict ):
'''simple docstring'''
lowercase__ = CTRLModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
model(lowerCamelCase, token_type_ids=lowerCamelCase, head_mask=lowerCamelCase )
model(lowerCamelCase, token_type_ids=lowerCamelCase )
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ), config.n_layer )
def lowercase__ ( self : Optional[Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Union[str, Any], lowerCamelCase : Union[str, Any], lowerCamelCase : Optional[int], *lowerCamelCase : Any ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase__ ( self : List[str], lowerCamelCase : Tuple, lowerCamelCase : Optional[Any], lowerCamelCase : int, lowerCamelCase : Any, *lowerCamelCase : int ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = CTRLForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowercase__ = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowercase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowercase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase__ = True
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : Dict, lowerCamelCase : Optional[int], lowerCamelCase : str, lowerCamelCase : Any, lowerCamelCase : List[str], lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = CTRLModelTester(self )
lowercase__ = ConfigTester(self, config_class=lowerCamelCase, n_embd=37 )
def lowercase__ ( self : Tuple ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : str ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def lowercase__ ( self : Any ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = CTRLModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
pass
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self : Any ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(lowerCamelCase )
lowercase__ = torch.tensor(
[[11_859, 0, 1_611, 8]], dtype=torch.long, device=lowerCamelCase ) # Legal the president is
lowercase__ = [
11_859,
0,
1_611,
8,
5,
150,
26_449,
2,
19,
348,
469,
3,
2_595,
48,
20_740,
246_533,
246_533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowercase__ = model.generate(lowerCamelCase, do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].tolist(), lowerCamelCase )
| 207
| 0
|
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
lowerCAmelCase__ = ['note_seq']
def __init__( self : Optional[Any] , *__SCREAMING_SNAKE_CASE : Union[str, Any] , **__SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
requires_backends(self , ["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *__SCREAMING_SNAKE_CASE : Dict , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *__SCREAMING_SNAKE_CASE : List[str] , **__SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
requires_backends(cls , ["""note_seq"""] )
| 358
|
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Union[str, Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__SCREAMING_SNAKE_CASE )
return config
def UpperCAmelCase__ ( self : str ) -> str:
"""simple docstring"""
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__SCREAMING_SNAKE_CASE , beta_end=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> int:
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=__SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__SCREAMING_SNAKE_CASE , prediction_type=__SCREAMING_SNAKE_CASE , sample_max_value=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for t in [0, 500, 999]:
self.check_over_forward(time_step=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(__SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(__SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(__SCREAMING_SNAKE_CASE ):
if i == len(__SCREAMING_SNAKE_CASE ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(__SCREAMING_SNAKE_CASE )
with self.assertRaises(__SCREAMING_SNAKE_CASE , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__SCREAMING_SNAKE_CASE , timesteps=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__SCREAMING_SNAKE_CASE , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__SCREAMING_SNAKE_CASE )
| 331
| 0
|
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class __UpperCamelCase ( a__ , unittest.TestCase ):
lowerCamelCase : Dict =DebertaTokenizer
lowerCamelCase : Optional[Any] =True
lowerCamelCase : List[Any] =DebertaTokenizerFast
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
a : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
a : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a : Dict = {"unk_token": "[UNK]"}
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase__ ) )
def __a ( self , **lowerCAmelCase__ ) -> str:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Dict = "lower newer"
a : Dict = "lower newer"
return input_text, output_text
def __a ( self ) -> List[Any]:
a : str = self.get_tokenizer()
a : str = "lower newer"
a : Union[str, Any] = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
a : Tuple = tokens + [tokenizer.unk_token]
a : Union[str, Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
def __a ( self ) -> List[Any]:
a : List[Any] = self.get_tokenizer()
a : Optional[Any] = tokenizer("Hello" , "World" )
a : List[Any] = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , lowerCAmelCase__ )
@slow
def __a ( self ) -> Tuple:
a : Tuple = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Dict = tokenizer.encode("sequence builders" , add_special_tokens=lowerCAmelCase__ )
a : str = tokenizer.encode("multi-sequence build" , add_special_tokens=lowerCAmelCase__ )
a : Dict = tokenizer.encode(
"sequence builders" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : Optional[int] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ )
a : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
a : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __a ( self ) -> str:
a : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
a : int = tokenizer_class.from_pretrained("microsoft/deberta-base" )
a : Optional[int] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
a : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ )
a : Optional[Any] = [tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) for seq in encoding["input_ids"]]
# fmt: off
a : Optional[int] = {
"input_ids": [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
a : Union[str, Any] = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , lowerCAmelCase__ )
for expected, decoded in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 105
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__a = logging.get_logger(__name__)
__a = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
__a = {'''facebook/blenderbot-3B''': 1_28}
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Dict = VOCAB_FILES_NAMES
A : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
A : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[int] = ['input_ids', 'attention_mask']
A : str = BlenderbotTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
lowercase : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop('''type''' ) )
lowercase : str = add_prefix_space
lowercase : List[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = add_prefix_space
lowercase : str = '''post_processor'''
lowercase : str = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
lowercase : Optional[int] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Tuple = tuple(state['''sep'''] )
if "cls" in state:
lowercase : Union[str, Any] = tuple(state['''cls'''] )
lowercase : Optional[int] = False
if state.get('''add_prefix_space''' , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
lowercase : Any = add_prefix_space
lowercase : Tuple = True
if state.get('''trim_offsets''' , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
lowercase : List[str] = trim_offsets
lowercase : Optional[int] = True
if changes_to_apply:
lowercase : Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , state.pop('''type''' ) )
lowercase : Union[str, Any] = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def __lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
lowercase : Any = value
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Dict = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
lowercase : Any = kwargs.get('''is_split_into_words''' , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : int = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
lowercase : Tuple = [self.sep_token_id]
lowercase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
return token_ids_a + [self.eos_token_id]
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ ):
lowercase : Any = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(SCREAMING_SNAKE_CASE__ )
lowercase : List[str] = ''' '''.join(SCREAMING_SNAKE_CASE__ )
lowercase : Any = self.encode(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > self.model_max_length:
lowercase : Tuple = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 337
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A : List[Any] = logging.get_logger(__name__)
_A : List[str] = {
'facebook/data2vec-text-base': 'https://huggingface.co/data2vec/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCAmelCase : str = "data2vec-text"
def __init__( self : Optional[Any] , A : Optional[Any]=3_0_5_2_2 , A : Tuple=7_6_8 , A : Optional[Any]=1_2 , A : Union[str, Any]=1_2 , A : Union[str, Any]=3_0_7_2 , A : Optional[int]="gelu" , A : List[str]=0.1 , A : Optional[Any]=0.1 , A : str=5_1_2 , A : Union[str, Any]=2 , A : Dict=0.02 , A : Union[str, Any]=1e-12 , A : List[str]=1 , A : Optional[Any]=0 , A : Optional[int]=2 , A : List[str]="absolute" , A : Optional[Any]=True , A : Union[str, Any]=None , **A : int , ) ->Union[str, Any]:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCamelCase__ : Tuple = vocab_size
lowerCamelCase__ : Optional[int] = hidden_size
lowerCamelCase__ : Dict = num_hidden_layers
lowerCamelCase__ : Optional[int] = num_attention_heads
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Dict = intermediate_size
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : Dict = attention_probs_dropout_prob
lowerCamelCase__ : Tuple = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Optional[Any] = initializer_range
lowerCamelCase__ : Dict = layer_norm_eps
lowerCamelCase__ : Tuple = position_embedding_type
lowerCamelCase__ : Union[str, Any] = use_cache
lowerCamelCase__ : Any = classifier_dropout
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@property
def __lowerCamelCase ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowerCamelCase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : List[str] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 364
|
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_A : Any = logging.get_logger(__name__)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
def run_func(UpperCAmelCase ):
@wraps(UpperCAmelCase )
def run_in_eager_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
@wraps(UpperCAmelCase )
@tf.function(experimental_compile=UpperCAmelCase )
def run_in_graph_mode(*UpperCAmelCase , **UpperCAmelCase ):
return func(*UpperCAmelCase , **UpperCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'''Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.''' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> ["tf.Tensor"]:
"""simple docstring"""
lowerCamelCase__ : List[Any] = random.Random()
lowerCamelCase__ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : TensorFlowBenchmarkArguments
_UpperCAmelCase : PretrainedConfig
_UpperCAmelCase : str = "TensorFlow"
@property
def __lowerCamelCase ( self : int ) ->Optional[int]:
return tf.__version__
def __lowerCamelCase ( self : Optional[int] , A : str , A : int , A : int ) ->float:
# initialize GPU on separate process
lowerCamelCase__ : Dict = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : int = self._prepare_inference_func(A , A , A )
return self._measure_speed(_inference )
def __lowerCamelCase ( self : str , A : str , A : int , A : int ) ->float:
lowerCamelCase__ : Optional[int] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : List[Any] = self._prepare_train_func(A , A , A )
return self._measure_speed(_train )
def __lowerCamelCase ( self : int , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : int = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_inference_func(A , A , A )
return self._measure_memory(_inference )
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->[Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , A )
lowerCamelCase__ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError('''A device strategy has to be initialized before using TensorFlow.''' )
lowerCamelCase__ : str = self._prepare_train_func(A , A , A )
return self._measure_memory(_train )
def __lowerCamelCase ( self : Dict , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Tuple = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[Any] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : int = getattr(A , A )
lowerCamelCase__ : int = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : Union[str, Any] = TF_MODEL_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Tuple = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Optional[Any] = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(A , decoder_input_ids=A , training=A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(A , training=A )
lowerCamelCase__ : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __lowerCamelCase ( self : List[str] , A : str , A : int , A : int ) ->Callable[[], None]:
lowerCamelCase__ : Tuple = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('''Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.''' )
if self.args.fpaa:
raise NotImplementedError('''Mixed precision is currently not supported.''' )
lowerCamelCase__ : Optional[int] = (
hasattr(A , '''architectures''' )
and isinstance(config.architectures , A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
lowerCamelCase__ : Any = '''TF''' + config.architectures[0] # prepend 'TF' for tensorflow model
lowerCamelCase__ : List[str] = __import__('''transformers''' , fromlist=[model_class] )
lowerCamelCase__ : Optional[int] = getattr(A , A )
lowerCamelCase__ : Optional[Any] = model_cls(A )
except ImportError:
raise ImportError(
F"{model_class} does not exist. If you just want to test the pretrained model, you might want to"
''' set `--only_pretrain_model` or `args.only_pretrain_model=True`.''' )
else:
lowerCamelCase__ : List[str] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](A )
# encoder-decoder has vocab size saved differently
lowerCamelCase__ : Optional[int] = config.vocab_size if hasattr(A , '''vocab_size''' ) else config.encoder.vocab_size
lowerCamelCase__ : Dict = random_input_ids(A , A , A )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
lowerCamelCase__ : int = model(A , decoder_input_ids=A , labels=A , training=A )[0]
lowerCamelCase__ : List[Any] = tf.gradients(A , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
lowerCamelCase__ : Optional[int] = model(A , labels=A , training=A )[0]
lowerCamelCase__ : List[str] = tf.gradients(A , model.trainable_variables )
return gradients
lowerCamelCase__ : Tuple = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __lowerCamelCase ( self : Tuple , A : Any ) ->float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('''Do inference on TPU. Running model 5 times to stabilize compilation''' )
timeit.repeat(A , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
lowerCamelCase__ : Optional[Any] = timeit.repeat(
A , repeat=self.args.repeat , number=1_0 , )
return min(A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
def __lowerCamelCase ( self : List[Any] , A : Callable[[], None] ) ->[Memory, MemorySummary]:
logger.info(
'''Note that TensorFlow allocates more memory than '''
'''it might need to speed up computation. '''
'''The memory reported here corresponds to the memory '''
'''reported by `nvidia-smi`, which can vary depending '''
'''on total available memory on the GPU that is used.''' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'''`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'''
''' consumption line by line.''' )
lowerCamelCase__ : Union[str, Any] = start_memory_tracing('''transformers''' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'''Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'''
''' with `args.memory=False`''' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'''py3nvml not installed, we won\'t log GPU memory usage. '''
'''Install py3nvml (pip install py3nvml) to log information about GPU.''' )
lowerCamelCase__ : Union[str, Any] = '''N/A'''
else:
logger.info(
'''Measuring total GPU usage on GPU device. Make sure to not have additional processes'''
''' running on the same GPU.''' )
# init nvml
nvml.nvmlInit()
func()
lowerCamelCase__ : Any = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
lowerCamelCase__ : Optional[int] = nvml.nvmlDeviceGetMemoryInfo(A )
lowerCamelCase__ : List[Any] = meminfo.used
lowerCamelCase__ : Union[str, Any] = Memory(A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'''When enabling line by line tracing, the max peak memory for CPU is inaccurate in'''
''' TensorFlow.''' )
lowerCamelCase__ : Tuple = None
else:
lowerCamelCase__ : Dict = measure_peak_memory_cpu(A )
lowerCamelCase__ : Optional[Any] = Memory(A ) if isinstance(A , A ) else memory_bytes
if self.args.trace_memory_line_by_line:
lowerCamelCase__ : Union[str, Any] = stop_memory_tracing(A )
if memory is None:
lowerCamelCase__ : Dict = summary.total
else:
lowerCamelCase__ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F"Doesn't fit on GPU. {e}" )
return "N/A", None
| 265
| 0
|
'''simple docstring'''
import pprint
import requests
lowerCamelCase :List[Any] = '''https://zenquotes.io/api'''
def a ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def a ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
lowerCamelCase :Union[str, Any] = random_quotes()
pprint.pprint(response)
| 206
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 206
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = DDIMPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"latents",
"callback",
"callback_steps",
}
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
lowercase = False
def lowerCamelCase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
snake_case__ : Optional[Any] = DDIMScheduler()
snake_case__ : Any = {"""unet""": unet, """scheduler""": scheduler}
return components
def lowerCamelCase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=0 ):
if str(snake_case_ ).startswith("""mps""" ):
snake_case__ : str = torch.manual_seed(snake_case_ )
else:
snake_case__ : List[Any] = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
snake_case__ : Union[str, Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[Any] = """cpu"""
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : str = self.pipeline_class(**snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ )
snake_case__ : str = pipe(**snake_case_ ).images
snake_case__ : str = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
snake_case__ : Union[str, Any] = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
snake_case__ : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case_ , 1E-3 )
def lowerCamelCase ( self : List[str] ):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Tuple ):
super().test_save_load_local(expected_max_difference=3E-3 )
def lowerCamelCase ( self : Optional[int] ):
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def lowerCamelCase ( self : str ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Any ):
snake_case__ : Optional[Any] = """google/ddpm-cifar10-32"""
snake_case__ : Optional[Any] = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : List[Any] = DDIMScheduler()
snake_case__ : List[str] = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddim.to(snake_case_ )
ddim.set_progress_bar_config(disable=snake_case_ )
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = ddim(generator=snake_case_ , eta=0.0 , output_type="""numpy""" ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
snake_case__ : Optional[Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Dict = """google/ddpm-ema-bedroom-256"""
snake_case__ : Dict = UNetaDModel.from_pretrained(snake_case_ )
snake_case__ : Optional[int] = DDIMScheduler.from_pretrained(snake_case_ )
snake_case__ : Tuple = DDIMPipeline(unet=snake_case_ , scheduler=snake_case_ )
ddpm.to(snake_case_ )
ddpm.set_progress_bar_config(disable=snake_case_ )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : int = ddpm(generator=snake_case_ , output_type="""numpy""" ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
snake_case__ : Optional[Any] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 43
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : int = []
def lowerCamelCase ( self : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any , **snake_case_ : str ):
self.events.append("""on_init_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : List[Any] , **snake_case_ : List[str] ):
self.events.append("""on_train_begin""" )
def lowerCamelCase ( self : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : int , **snake_case_ : str ):
self.events.append("""on_train_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : int , snake_case_ : List[Any] , snake_case_ : List[str] , **snake_case_ : int ):
self.events.append("""on_epoch_begin""" )
def lowerCamelCase ( self : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] , **snake_case_ : Union[str, Any] ):
self.events.append("""on_epoch_end""" )
def lowerCamelCase ( self : Tuple , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , **snake_case_ : str ):
self.events.append("""on_step_begin""" )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] , snake_case_ : Any , **snake_case_ : Optional[Any] ):
self.events.append("""on_step_end""" )
def lowerCamelCase ( self : List[str] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : List[Any] ):
self.events.append("""on_evaluate""" )
def lowerCamelCase ( self : int , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : Optional[int] , **snake_case_ : Any ):
self.events.append("""on_predict""" )
def lowerCamelCase ( self : int , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Dict , **snake_case_ : str ):
self.events.append("""on_save""" )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : int , **snake_case_ : Optional[int] ):
self.events.append("""on_log""" )
def lowerCamelCase ( self : Any , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[int] , **snake_case_ : Tuple ):
self.events.append("""on_prediction_step""" )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase ( self : Tuple ):
snake_case__ : List[Any] = tempfile.mkdtemp()
def lowerCamelCase ( self : List[Any] ):
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : List[Any]=0 , snake_case_ : List[Any]=0 , snake_case_ : List[str]=64 , snake_case_ : Optional[Any]=64 , snake_case_ : List[Any]=None , snake_case_ : Optional[int]=False , **snake_case_ : int ):
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
snake_case__ : Optional[int] = RegressionDataset(length=snake_case_ )
snake_case__ : Dict = RegressionDataset(length=snake_case_ )
snake_case__ : Any = RegressionModelConfig(a=snake_case_ , b=snake_case_ )
snake_case__ : str = RegressionPreTrainedModel(snake_case_ )
snake_case__ : Any = TrainingArguments(self.output_dir , disable_tqdm=snake_case_ , report_to=[] , **snake_case_ )
return Trainer(
snake_case_ , snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , callbacks=snake_case_ , )
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[int] ):
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
# Order doesn't matter
snake_case__ : int = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
snake_case__ : Optional[int] = sorted(snake_case_ , key=lambda snake_case_ : cb.__name__ if isinstance(snake_case_ , snake_case_ ) else cb.__class__.__name__ )
for cba, cba in zip(snake_case_ , snake_case_ ):
if isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , snake_case_ )
elif isinstance(snake_case_ , snake_case_ ) and not isinstance(snake_case_ , snake_case_ ):
self.assertEqual(snake_case_ , cba.__class__ )
elif not isinstance(snake_case_ , snake_case_ ) and isinstance(snake_case_ , snake_case_ ):
self.assertEqual(cba.__class__ , snake_case_ )
else:
self.assertEqual(snake_case_ , snake_case_ )
def lowerCamelCase ( self : str , snake_case_ : Optional[Any] ):
snake_case__ : Optional[Any] = ["""on_init_end""", """on_train_begin"""]
snake_case__ : Optional[int] = 0
snake_case__ : Any = len(trainer.get_eval_dataloader() )
snake_case__ : Optional[int] = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("""on_epoch_begin""" )
for _ in range(snake_case_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("""on_log""" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("""on_save""" )
expected_events.append("""on_epoch_end""" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Any = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# Callbacks passed at init are added to the default callbacks
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : int = self.get_trainer(disable_tqdm=snake_case_ )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Any = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : List[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(snake_case_ )
self.assertEqual(cb.__class__ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
# We can also add, pop, or remove by instance
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(snake_case_ )
expected_callbacks.remove(snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
snake_case__ : Any = self.get_trainer()
snake_case__ : Dict = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[int] = trainer.pop_callback(snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
trainer.add_callback(snake_case_ )
expected_callbacks.insert(0 , snake_case_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , snake_case_ )
def lowerCamelCase ( self : str ):
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="""ignore""" , category=snake_case_ )
snake_case__ : str = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" )
trainer.train()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
snake_case__ : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , )
trainer.train()
snake_case__ : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(snake_case_ , self.get_expected_events(snake_case_ ) )
# warning should be emitted for duplicated callbacks
with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock:
snake_case__ : Optional[Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(snake_case_ ) in warn_mock.call_args[0][0]
| 43
| 1
|
import math
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCAmelCase ( snake_case = 0.1 ):
"""simple docstring"""
_lowerCAmelCase = 3
_lowerCAmelCase = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82
|
"""simple docstring"""
from __future__ import annotations
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = set(_SCREAMING_SNAKE_CASE ), [start]
while stack:
UpperCamelCase = stack.pop()
explored.add(_SCREAMING_SNAKE_CASE )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(_SCREAMING_SNAKE_CASE )
return explored
lowerCAmelCase__ = {
'''A''': ['''B''', '''C''', '''D'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F'''],
'''D''': ['''B''', '''D'''],
'''E''': ['''B''', '''F'''],
'''F''': ['''C''', '''E''', '''G'''],
'''G''': ['''F'''],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, '''A'''))
| 153
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __lowercase ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase = None , ) -> List[str]:
'''simple docstring'''
_A = {}
if train_file is not None:
_A = [train_file]
if eval_file is not None:
_A = [eval_file]
if test_file is not None:
_A = [test_file]
_A = datasets.load_dataset("csv" , data_files=__lowercase )
_A = list(ds[list(files.keys() )[0]].features.keys() )
_A = features_name.pop(__lowercase )
_A = list(set(ds[list(files.keys() )[0]][label_name] ) )
_A = {label: i for i, label in enumerate(__lowercase )}
_A = tokenizer.model_input_names
_A = {}
if len(__lowercase ) == 1:
for k in files.keys():
_A = ds[k].map(
lambda __lowercase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__lowercase , max_length=__lowercase , padding="max_length" ) , batched=__lowercase , )
elif len(__lowercase ) == 2:
for k in files.keys():
_A = ds[k].map(
lambda __lowercase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__lowercase , max_length=__lowercase , padding="max_length" , ) , batched=__lowercase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_A = {k: v for k, v in ex.items() if k in input_names}
_A = labelaid[ex[label_name]]
yield (d, label)
_A = (
tf.data.Dataset.from_generator(
__lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_A = (
tf.data.Dataset.from_generator(
__lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_A = (
tf.data.Dataset.from_generator(
__lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCamelCase_ = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(metadata={'''help''': '''Which column contains the label'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the training file'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the development file'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''The path of the test file'''} )
snake_case = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
"""simple docstring"""
snake_case = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
snake_case = field(default=snake_case_ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
snake_case = field(
default=snake_case_ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_A , _A , _A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A , _A , _A , _A = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__lowercase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__lowercase ) , labelaid=__lowercase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
def compute_metrics(__lowercase ) -> Dict:
_A = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_A = TFTrainer(
model=__lowercase , args=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , compute_metrics=__lowercase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_A = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_A = trainer.evaluate()
_A = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(__lowercase )
return results
if __name__ == "__main__":
main()
| 174
|
'''simple docstring'''
import operator
def __lowercase ( __lowercase , __lowercase = False , __lowercase = None ) -> list:
'''simple docstring'''
_A = operator.lt if reverse else operator.gt
_A = solution or []
if not arr:
return solution
_A = [arr.pop(0 )]
for i, item in enumerate(__lowercase ):
if _operator(__lowercase , sublist[-1] ):
sublist.append(__lowercase )
arr.pop(__lowercase )
# merging sublist into solution list
if not solution:
solution.extend(__lowercase )
else:
while sublist:
_A = sublist.pop(0 )
for i, xx in enumerate(__lowercase ):
if not _operator(__lowercase , __lowercase ):
solution.insert(__lowercase , __lowercase )
break
else:
solution.append(__lowercase )
strand_sort(__lowercase , __lowercase , __lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 174
| 1
|
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCAmelCase_ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCAmelCase_ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def snake_case__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : List[str]="binary" , _UpperCamelCase : Tuple=None ) ->Optional[Any]:
snake_case_ = fa_score(
_UpperCamelCase , _UpperCamelCase , labels=_UpperCamelCase , pos_label=_UpperCamelCase , average=_UpperCamelCase , sample_weight=_UpperCamelCase )
return {"f1": float(_UpperCamelCase ) if score.size == 1 else score}
| 8
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 331
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCamelCase : Tuple = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Any = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCamelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 337
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowerCamelCase : Dict = 'src/diffusers'
_lowerCamelCase : Dict = '.'
# This is to make sure the diffusers module imported is the one in the repo.
_lowerCamelCase : List[str] = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowerCamelCase : Tuple = spec.loader.load_module()
def __a ( UpperCAmelCase , UpperCAmelCase ) ->Union[str, Any]:
"""simple docstring"""
return line.startswith(UpperCAmelCase ) or len(UpperCAmelCase ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , UpperCAmelCase ) is not None
def __a ( UpperCAmelCase ) ->Dict:
"""simple docstring"""
A = object_name.split(""".""" )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(UpperCAmelCase ) and not os.path.isfile(os.path.join(UpperCAmelCase , f"""{module}.py""" ) ):
i += 1
if i < len(UpperCAmelCase ):
A = os.path.join(UpperCAmelCase , parts[i] )
if i >= len(UpperCAmelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(UpperCAmelCase , f"""{module}.py""" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = """"""
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(UpperCAmelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(UpperCAmelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(UpperCAmelCase ) and _should_continue(lines[line_index] , UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(UpperCAmelCase )
_lowerCamelCase : str = re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
_lowerCamelCase : Any = re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
_lowerCamelCase : str = re.compile(R'<FILL\s+[^>]*>')
def __a ( UpperCAmelCase ) ->str:
"""simple docstring"""
A = code.split("""\n""" )
A = 0
while idx < len(UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(UpperCAmelCase ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def __a ( UpperCAmelCase ) ->Optional[int]:
"""simple docstring"""
A = len(get_indent(UpperCAmelCase ) ) > 0
if has_indent:
A = f"""class Bla:\n{code}"""
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=UpperCAmelCase )
A = black.format_str(UpperCAmelCase , mode=UpperCAmelCase )
A , A = style_docstrings_in_code(UpperCAmelCase )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def __a ( UpperCAmelCase , UpperCAmelCase=False ) ->List[str]:
"""simple docstring"""
with open(UpperCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(UpperCAmelCase ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(UpperCAmelCase )
A = get_indent(UpperCAmelCase )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(UpperCAmelCase ):
break
A = lines[line_index]
A = _should_continue(UpperCAmelCase , UpperCAmelCase ) and re.search(f"""^{indent}# End copy""" , UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = """""".join(UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(UpperCAmelCase ) is None]
A = """\n""".join(UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(UpperCAmelCase ) > 0:
A = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A = [_re_replace_pattern.search(UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , UpperCAmelCase )
A = re.sub(obja.upper() , obja.upper() , UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(UpperCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase )
return diffs
def __a ( UpperCAmelCase = False ) ->int:
"""simple docstring"""
A = glob.glob(os.path.join(UpperCAmelCase , """**/*.py""" ) , recursive=UpperCAmelCase )
A = []
for filename in all_files:
A = is_copy_consistent(UpperCAmelCase , UpperCAmelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(UpperCAmelCase ) > 0:
A = """\n""".join(UpperCAmelCase )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_lowerCamelCase : Any = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 337
| 1
|
def A (__A : int ) -> None:
"""simple docstring"""
UpperCAmelCase_ = generate_pascal_triangle(__A )
for row_idx in range(__A ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = []
for current_row_idx in range(__A ):
UpperCAmelCase_ = populate_current_row(__A , __A )
triangle.append(__A )
return triangle
def A (__A : list[list[int]] , __A : int ) -> list[int]:
"""simple docstring"""
UpperCAmelCase_ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
UpperCAmelCase_ , UpperCAmelCase_ = 1, 1
for current_col_idx in range(1 , __A ):
calculate_current_element(
__A , __A , __A , __A )
return current_row
def A (__A : list[list[int]] , __A : list[int] , __A : int , __A : int , ) -> None:
"""simple docstring"""
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx - 1]
UpperCAmelCase_ = triangle[current_row_idx - 1][current_col_idx]
UpperCAmelCase_ = above_to_left_elt + above_to_right_elt
def A (__A : int ) -> list[list[int]]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
UpperCAmelCase_ = [[1]]
for row_index in range(1 , __A ):
UpperCAmelCase_ = [0] + result[-1] + [0]
UpperCAmelCase_ = row_index + 1
# Calculate the number of distinct elements in a row
UpperCAmelCase_ = sum(divmod(__A , 2 ) )
UpperCAmelCase_ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
UpperCAmelCase_ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
UpperCAmelCase_ = row_first_half + row_second_half
result.append(__A )
return result
def A () -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__A : Callable , __A : int ) -> None:
UpperCAmelCase_ = F"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__A , __A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 51
|
'''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ):
lowercase = LongformerTokenizer
lowercase = True
lowercase = LongformerTokenizerFast
lowercase = True
def _lowercase( self ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase : List[str] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) )
UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""}
UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(A ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(A ) )
def _lowercase( self , **A ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , **A ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def _lowercase( self , A ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = """lower newer"""
UpperCAmelCase : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
UpperCAmelCase : Dict = """lower newer"""
UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True)
self.assertListEqual(A , A )
UpperCAmelCase : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : str = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" )
UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : List[str] = tokenizer.encode(
"""sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A )
UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase( self ) -> List[Any]:
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : List[Any] = """Encode this sequence."""
UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]]
# Testing encoder arguments
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A )
UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(A , A )
tokenizer.add_special_tokens({"""bos_token""": """<s>"""} )
UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(A , A )
# Testing spaces after special tokens
UpperCAmelCase : Union[str, Any] = """<mask>"""
tokenizer.add_special_tokens(
{"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space
UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A )
UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence"""
UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence"""
UpperCAmelCase : Union[str, Any] = tokenizer.encode(A )
UpperCAmelCase : Union[str, Any] = encoded.index(A )
UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(A , A )
UpperCAmelCase : Tuple = tokenizer.encode(A )
UpperCAmelCase : Optional[int] = encoded.index(A )
UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(A , A )
def _lowercase( self ) -> Optional[int]:
pass
def _lowercase( self ) -> Any:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A )
UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence."""
UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
def _lowercase( self ) -> List[Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""add_prefix_space"""] , A )
self.assertEqual(post_processor_state["""trim_offsets"""] , A )
def _lowercase( self ) -> Optional[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}'''
UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , )
UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[Any] = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
A , use_fast=A , add_prefix_space=A , trim_offsets=A )
UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
| 265
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowercase = {
"""configuration_resnet""": ["""RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ResNetConfig""", """ResNetOnnxConfig"""]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ResNetForImageClassification""",
"""ResNetModel""",
"""ResNetPreTrainedModel""",
"""ResNetBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFResNetForImageClassification""",
"""TFResNetModel""",
"""TFResNetPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""FlaxResNetForImageClassification""",
"""FlaxResNetModel""",
"""FlaxResNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowercase = {"""configuration_deit""": ["""DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DeiTConfig""", """DeiTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""DeiTFeatureExtractor"""]
_lowercase = ["""DeiTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DeiTForImageClassification""",
"""DeiTForImageClassificationWithTeacher""",
"""DeiTForMaskedImageModeling""",
"""DeiTModel""",
"""DeiTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDeiTForImageClassification""",
"""TFDeiTForImageClassificationWithTeacher""",
"""TFDeiTForMaskedImageModeling""",
"""TFDeiTModel""",
"""TFDeiTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
| 1
|
__lowercase = {
'''Pillow''': '''Pillow''',
'''accelerate''': '''accelerate>=0.11.0''',
'''compel''': '''compel==0.1.8''',
'''black''': '''black~=23.1''',
'''datasets''': '''datasets''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.13.2''',
'''requests-mock''': '''requests-mock==1.10.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''invisible-watermark''': '''invisible-watermark''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2''',
'''jaxlib''': '''jaxlib>=0.1.65''',
'''Jinja2''': '''Jinja2''',
'''k-diffusion''': '''k-diffusion>=0.0.12''',
'''torchsde''': '''torchsde''',
'''note_seq''': '''note_seq''',
'''librosa''': '''librosa''',
'''numpy''': '''numpy''',
'''omegaconf''': '''omegaconf''',
'''parameterized''': '''parameterized''',
'''protobuf''': '''protobuf>=3.20.3,<4''',
'''pytest''': '''pytest''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''ruff''': '''ruff>=0.0.241''',
'''safetensors''': '''safetensors''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''scipy''': '''scipy''',
'''onnx''': '''onnx''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''tensorboard''': '''tensorboard''',
'''torch''': '''torch>=1.4''',
'''torchvision''': '''torchvision''',
'''transformers''': '''transformers>=4.25.1''',
'''urllib3''': '''urllib3<=2.0.0''',
}
| 43
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__lowercase = (720, 1280) # Height, Width
__lowercase = (0.4, 0.6) # if height or width lower than this scale, drop it.
__lowercase = 1 / 100
__lowercase = ''''''
__lowercase = ''''''
__lowercase = ''''''
__lowercase = 250
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase :List[Any] = get_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for index in range(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 4 )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = update_image_and_anno(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , filter_scale=SCREAMING_SNAKE_CASE , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
__UpperCamelCase :List[Any] = random_chars(32 )
__UpperCamelCase :List[str] = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
__UpperCamelCase :Tuple = f"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(f"""{file_root}.jpg""" , SCREAMING_SNAKE_CASE , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
__UpperCamelCase :Optional[Any] = []
for anno in new_annos:
__UpperCamelCase :int = anno[3] - anno[1]
__UpperCamelCase :Optional[int] = anno[4] - anno[2]
__UpperCamelCase :int = anno[1] + width / 2
__UpperCamelCase :List[str] = anno[2] + height / 2
__UpperCamelCase :str = f"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(SCREAMING_SNAKE_CASE )
with open(f"""{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = []
__UpperCamelCase :str = []
for label_file in glob.glob(os.path.join(SCREAMING_SNAKE_CASE , '''*.txt''' ) ):
__UpperCamelCase :Any = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(SCREAMING_SNAKE_CASE ) as in_file:
__UpperCamelCase :str = in_file.readlines()
__UpperCamelCase :Optional[int] = os.path.join(SCREAMING_SNAKE_CASE , f"""{label_name}.jpg""" )
__UpperCamelCase :int = []
for obj_list in obj_lists:
__UpperCamelCase :Optional[int] = obj_list.rstrip('''\n''' ).split(''' ''' )
__UpperCamelCase :Any = float(obj[1] ) - float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) - float(obj[4] ) / 2
__UpperCamelCase :Dict = float(obj[1] ) + float(obj[3] ) / 2
__UpperCamelCase :List[str] = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(SCREAMING_SNAKE_CASE )
labels.append(SCREAMING_SNAKE_CASE )
return img_paths, labels
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 0.0 , ):
'''simple docstring'''
__UpperCamelCase :List[str] = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
__UpperCamelCase :List[Any] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :int = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
__UpperCamelCase :Optional[int] = int(scale_x * output_size[1] )
__UpperCamelCase :Any = int(scale_y * output_size[0] )
__UpperCamelCase :List[str] = []
__UpperCamelCase :Dict = []
for i, index in enumerate(SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Any = all_img_list[index]
path_list.append(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = all_annos[index]
__UpperCamelCase :Union[str, Any] = cva.imread(SCREAMING_SNAKE_CASE )
if i == 0: # top-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, divid_point_y) )
__UpperCamelCase :Union[str, Any] = img
for bbox in img_annos:
__UpperCamelCase :Union[str, Any] = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = bbox[2] * scale_y
__UpperCamelCase :int = bbox[3] * scale_x
__UpperCamelCase :Union[str, Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, divid_point_y) )
__UpperCamelCase :List[str] = img
for bbox in img_annos:
__UpperCamelCase :str = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Dict = bbox[2] * scale_y
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :List[Any] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
__UpperCamelCase :str = cva.resize(SCREAMING_SNAKE_CASE , (divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Tuple = bbox[1] * scale_x
__UpperCamelCase :Optional[Any] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Tuple = bbox[3] * scale_x
__UpperCamelCase :Dict = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
__UpperCamelCase :Optional[int] = cva.resize(
SCREAMING_SNAKE_CASE , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
__UpperCamelCase :Optional[int] = img
for bbox in img_annos:
__UpperCamelCase :Optional[Any] = scale_x + bbox[1] * (1 - scale_x)
__UpperCamelCase :Optional[int] = scale_y + bbox[2] * (1 - scale_y)
__UpperCamelCase :Optional[Any] = scale_x + bbox[3] * (1 - scale_x)
__UpperCamelCase :int = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
__UpperCamelCase :List[Any] = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert number_char > 1, "The number of character should greater than 1"
__UpperCamelCase :Optional[Any] = ascii_lowercase + digits
return "".join(random.choice(SCREAMING_SNAKE_CASE ) for _ in range(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
print('''DONE ✅''')
| 43
| 1
|
def lowercase ( lowerCAmelCase__ : Optional[int] = 100 ) -> int:
__a = 0
__a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 370
|
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11
| 0
|
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_UpperCAmelCase : Optional[int] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_UpperCAmelCase : str = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 174
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_UpperCAmelCase : Any = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
| 1
|
'''simple docstring'''
def __lowerCamelCase ( _lowercase ) -> bool:
if not isinstance(_lowercase , _lowercase ):
UpperCAmelCase : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowercase )
if number < 0:
return False
UpperCAmelCase : List[Any] = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
'''simple docstring'''
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCamelCase_ ( __magic_name__ ):
def __init__( self , A=0.0_1 , A=1000 ) -> List[str]:
UpperCAmelCase : List[Any] = p_stop
UpperCAmelCase : Optional[int] = max_length
def __iter__( self ) -> Union[str, Any]:
UpperCAmelCase : Dict = 0
UpperCAmelCase : Union[str, Any] = False
while not stop and count < self.max_length:
yield count
count += 1
UpperCAmelCase : Any = random.random() < self.p_stop
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self , A , A , A=False , A=True ) -> Union[str, Any]:
UpperCAmelCase : List[str] = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
UpperCAmelCase : List[str] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def _lowercase( self ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Optional[int] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : int = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase : Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(A , A )
def _lowercase( self ) -> Tuple:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[Any] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def _lowercase( self ) -> Any:
# Check the shards when the dataset is a round multiple of total batch size.
UpperCAmelCase : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
UpperCAmelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
UpperCAmelCase : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
UpperCAmelCase : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : Optional[int] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : Dict = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
UpperCAmelCase : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
UpperCAmelCase : Tuple = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def _lowercase( self ) -> List[Any]:
# Check the shards when the dataset is a round multiple of batch size.
UpperCAmelCase : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
UpperCAmelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Dict = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
UpperCAmelCase : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
UpperCAmelCase : str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
UpperCAmelCase : Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
UpperCAmelCase : List[str] = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _lowercase( self , A , A , A , A=False , A=2 , A=False ) -> Tuple:
random.seed(A )
UpperCAmelCase : Dict = list(A )
UpperCAmelCase : Any = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
UpperCAmelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
UpperCAmelCase : Optional[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
UpperCAmelCase : List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
UpperCAmelCase : List[Any] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def _lowercase( self ) -> str:
UpperCAmelCase : Tuple = 42
UpperCAmelCase : List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
UpperCAmelCase : List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def _lowercase( self ) -> Tuple:
UpperCAmelCase : Dict = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
UpperCAmelCase : Any = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> int:
UpperCAmelCase : Any = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> Union[str, Any]:
UpperCAmelCase : List[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
UpperCAmelCase : Optional[Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _lowercase( self ) -> Dict:
Accelerator()
UpperCAmelCase : Union[str, Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 338
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''UniSpeechForCTC''',
'''UniSpeechForPreTraining''',
'''UniSpeechForSequenceClassification''',
'''UniSpeechModel''',
'''UniSpeechPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 337
|
from __future__ import annotations
__a = []
def __lowercase ( _UpperCamelCase, _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCamelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, -1, -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCamelCase, -1, -1 ), range(_UpperCamelCase, len(_UpperCamelCase ) ) ):
if board[i][j] == 1:
return False
return True
def __lowercase ( _UpperCamelCase, _UpperCamelCase ) ->bool:
"""simple docstring"""
if row >= len(_UpperCamelCase ):
solution.append(_UpperCamelCase )
printboard(_UpperCamelCase )
print()
return True
for i in range(len(_UpperCamelCase ) ):
if is_safe(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase ):
lowercase : int = 1
solve(_UpperCamelCase, row + 1 )
lowercase : Tuple = 0
return False
def __lowercase ( _UpperCamelCase ) ->None:
"""simple docstring"""
for i in range(len(_UpperCamelCase ) ):
for j in range(len(_UpperCamelCase ) ):
if board[i][j] == 1:
print('''Q''', end=''' ''' )
else:
print('''.''', end=''' ''' )
print()
# n=int(input("The no. of queens"))
__a = 8
__a = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 337
| 1
|
"""simple docstring"""
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : List[Any] = TypeVar("""KT""")
SCREAMING_SNAKE_CASE : Optional[int] = TypeVar("""VT""")
class _UpperCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , a_ = "root" , a_ = None ):
'''simple docstring'''
__snake_case : List[Any] = key
__snake_case : Tuple = value
__snake_case : list[Node[KT, VT]] = []
def __repr__(self ):
'''simple docstring'''
return f"""Node({self.key}: {self.value})"""
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.forward )
class _UpperCAmelCase ( Generic[KT, VT] ):
'''simple docstring'''
def __init__(self , a_ = 0.5 , a_ = 16 ):
'''simple docstring'''
__snake_case : Node[KT, VT] = Node[KT, VT]()
__snake_case : List[str] = 0
__snake_case : List[str] = p
__snake_case : str = max_level
def __str__(self ):
'''simple docstring'''
__snake_case : int = list(self )
if len(snake_case__ ) == 0:
return f"""SkipList(level={self.level})"""
__snake_case : Any = max((len(str(snake_case__ ) ) for item in items) , default=4 )
__snake_case : List[str] = max(snake_case__ , 4 ) + 4
__snake_case : Union[str, Any] = self.head
__snake_case : int = []
__snake_case : Any = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(snake_case__ , '''-''' ) + '''* ''' * len(snake_case__ ) )
lines.append(''' ''' * label_size + '''| ''' * len(snake_case__ ) )
while len(node.forward ) != 0:
__snake_case : Union[str, Any] = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(snake_case__ , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(snake_case__ ) )
__snake_case : Union[str, Any] = node.forward
lines.append('''None'''.ljust(snake_case__ ) + '''* ''' * len(snake_case__ ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(snake_case__ )
def __iter__(self ):
'''simple docstring'''
__snake_case : Optional[int] = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
__snake_case : List[Any] = node.forward[0]
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = []
__snake_case : Optional[int] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
__snake_case : Any = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(snake_case__ )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = self._locate_node(snake_case__ )
if node is not None:
for i, update_node in enumerate(snake_case__ ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
__snake_case : Optional[int] = node.forward[i]
else:
__snake_case : int = update_node.forward[:i]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : int = self._locate_node(snake_case__ )
if node is not None:
__snake_case : Any = value
else:
__snake_case : Dict = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , snake_case__ ):
update_vector.append(self.head )
__snake_case : Optional[int] = level
__snake_case : List[str] = Node(snake_case__ , snake_case__ )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(snake_case__ )
else:
__snake_case : Union[str, Any] = new_node
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : str = self._locate_node(snake_case__ )
if node is not None:
return node.value
return None
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Any = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
__snake_case : Optional[int] = skip_list.head
__snake_case : Any = {}
while node.level != 0:
__snake_case : List[str] = node.forward[0]
__snake_case : int = node.value
assert len(_A ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def lowercase ( ) ->List[str]:
"""simple docstring"""
__snake_case : Any = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
__snake_case : Union[str, Any] = skip_list.head
__snake_case : Optional[Any] = {}
while node.level != 0:
__snake_case : List[Any] = node.forward[0]
__snake_case : Any = node.value
if len(_A ) != 4:
print()
assert len(_A ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def lowercase ( ) ->str:
"""simple docstring"""
__snake_case : List[Any] = SkipList()
assert skip_list.find('''Some key''' ) is None
def lowercase ( ) ->str:
"""simple docstring"""
__snake_case : Tuple = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Optional[Any] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : Union[str, Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : List[str] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def lowercase ( ) ->Any:
"""simple docstring"""
__snake_case : List[str] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 142 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(_snake_case : Optional[Any] ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(_A )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def lowercase ( ) ->List[str]:
"""simple docstring"""
def is_sorted(_snake_case : List[Any] ):
return all(next_item >= item for item, next_item in zip(_A , lst[1:] ) )
__snake_case : Optional[Any] = SkipList()
for i in range(10 ):
skip_list.insert(_A , _A )
assert is_sorted(list(_A ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(_A ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(_A ) )
def lowercase ( ) ->List[Any]:
"""simple docstring"""
for _ in range(100 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def lowercase ( ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Any = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
SCREAMING_SNAKE_CASE : int = datasets.utils.logging.get_logger(__name__)
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
lowerCamelCase__ =10000
lowerCamelCase__ =None
lowerCamelCase__ =None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
lowerCamelCase__ =ParquetConfig
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
__snake_case : int = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a_ , (str, list, tuple) ):
__snake_case : Union[str, Any] = data_files
if isinstance(a_ , a_ ):
__snake_case : Union[str, Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : List[Any] = [dl_manager.iter_files(a_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
__snake_case : int = []
for split_name, files in data_files.items():
if isinstance(a_ , a_ ):
__snake_case : List[Any] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__snake_case : int = [dl_manager.iter_files(a_ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a_ ):
with open(a_ , '''rb''' ) as f:
__snake_case : Any = datasets.Features.from_arrow_schema(pq.read_schema(a_ ) )
break
splits.append(datasets.SplitGenerator(name=a_ , gen_kwargs={'''files''': files} ) )
return splits
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[Any] = table_cast(a_ , self.info.features.arrow_schema )
return pa_table
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[Any] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
f"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a_ ) ):
with open(a_ , '''rb''' ) as f:
__snake_case : int = pq.ParquetFile(a_ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
__snake_case : Dict = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield f"""{file_idx}_{batch_idx}""", self._cast_table(a_ )
except ValueError as e:
logger.error(f"""Failed to read file '{file}' with error {type(a_ )}: {e}""" )
raise
| 24
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A : Optional[Any] = {
'''configuration_conditional_detr''': [
'''CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''ConditionalDetrConfig''',
'''ConditionalDetrOnnxConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = ['''ConditionalDetrFeatureExtractor''']
_A : Optional[Any] = ['''ConditionalDetrImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
'''CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConditionalDetrForObjectDetection''',
'''ConditionalDetrForSegmentation''',
'''ConditionalDetrModel''',
'''ConditionalDetrPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 229
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int , snake_case_ : int ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = str(bin(snake_case_ ) )[2:] # remove the leading "0b"
__lowerCAmelCase = max(len(snake_case_ ) , len(snake_case_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(snake_case_ ) , b_binary.zfill(snake_case_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
| 1
|
"""simple docstring"""
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class __A (snake_case__):
'''simple docstring'''
def __init__( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=13 , UpperCAmelCase_ : str=7 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=99 , UpperCAmelCase_ : Tuple=32 , UpperCAmelCase_ : Optional[int]=5 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Any=512 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Any=1 , ) ->List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = scope
snake_case_ = q_groups
snake_case_ = k_groups
snake_case_ = v_groups
snake_case_ = post_attention_groups
snake_case_ = intermediate_groups
snake_case_ = output_groups
def lowerCAmelCase ( self : Any ) ->Dict:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self : int ) ->int:
"""simple docstring"""
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = SqueezeBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
snake_case_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] ) ->Dict:
"""simple docstring"""
snake_case_ = SqueezeBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple ) ->List[str]:
"""simple docstring"""
snake_case_ = SqueezeBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int ) ->Optional[Any]:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = SqueezeBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = SqueezeBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] ) ->Dict:
"""simple docstring"""
snake_case_ = self.num_choices
snake_case_ = SqueezeBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase ( self : Union[str, Any] ) ->int:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(snake_case_) = config_and_inputs
snake_case_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __A (snake_case__ , snake_case__ , unittest.TestCase):
'''simple docstring'''
__lowercase: Any = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
__lowercase: Optional[int] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase: Dict = False
__lowercase: str = True
__lowercase: List[str] = False
def lowerCAmelCase ( self : Optional[Any] ) ->List[str]:
"""simple docstring"""
snake_case_ = SqueezeBertModelTester(self )
snake_case_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , dim=37 )
def lowerCAmelCase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase ( self : str ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : int ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : List[str] ) ->List[str]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Optional[int] ) ->Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def lowerCAmelCase ( self : List[str] ) ->Optional[int]:
"""simple docstring"""
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = SqueezeBertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_sentencepiece
@require_tokenizers
@require_torch
class __A (unittest.TestCase):
'''simple docstring'''
@slow
def lowerCAmelCase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
snake_case_ = SqueezeBertForSequenceClassification.from_pretrained("""squeezebert/squeezebert-mnli""" )
snake_case_ = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
snake_case_ = model(_SCREAMING_SNAKE_CASE )[0]
snake_case_ = torch.Size((1, 3) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
snake_case_ = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] )
self.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 371
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233
| 0
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=13 , __lowercase=30 , __lowercase=2 , __lowercase=3 , __lowercase=True , __lowercase=True , __lowercase=32 , __lowercase=2 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=10 , __lowercase=0.02 , __lowercase=3 , __lowercase=0.6 , __lowercase=None , ) -> Tuple:
__UpperCamelCase :List[str] = parent
__UpperCamelCase :List[Any] = batch_size
__UpperCamelCase :str = image_size
__UpperCamelCase :List[Any] = patch_size
__UpperCamelCase :List[str] = num_channels
__UpperCamelCase :Union[str, Any] = is_training
__UpperCamelCase :List[str] = use_labels
__UpperCamelCase :Tuple = hidden_size
__UpperCamelCase :str = num_hidden_layers
__UpperCamelCase :List[Any] = num_attention_heads
__UpperCamelCase :Optional[Any] = intermediate_size
__UpperCamelCase :List[str] = hidden_act
__UpperCamelCase :str = hidden_dropout_prob
__UpperCamelCase :List[str] = attention_probs_dropout_prob
__UpperCamelCase :Union[str, Any] = type_sequence_label_size
__UpperCamelCase :List[str] = initializer_range
__UpperCamelCase :Optional[int] = mask_ratio
__UpperCamelCase :Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
__UpperCamelCase :Optional[Any] = (image_size // patch_size) ** 2
__UpperCamelCase :Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1)))
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__UpperCamelCase :Tuple = None
if self.use_labels:
__UpperCamelCase :str = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self) -> Tuple:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> Any:
__UpperCamelCase :Any = TFViTMAEModel(config=__lowercase)
__UpperCamelCase :Union[str, Any] = model(__lowercase , training=__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[str]:
__UpperCamelCase :str = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :str = model(__lowercase , training=__lowercase)
# expected sequence length = num_patches
__UpperCamelCase :List[str] = (self.image_size // self.patch_size) ** 2
__UpperCamelCase :Union[str, Any] = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
# test greyscale images
__UpperCamelCase :List[str] = 1
__UpperCamelCase :List[str] = TFViTMAEForPreTraining(__lowercase)
__UpperCamelCase :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__UpperCamelCase :Dict = model(__lowercase , training=__lowercase)
__UpperCamelCase :List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels))
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
((__UpperCamelCase) , (__UpperCamelCase) , (__UpperCamelCase)) :List[str] = config_and_inputs
__UpperCamelCase :Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : str = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
a__ : Dict = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
a__ : Tuple = False
a__ : str = False
a__ : Optional[Any] = False
a__ : Union[str, Any] = False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :List[str] = TFViTMAEModelTester(self)
__UpperCamelCase :List[str] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37)
def UpperCamelCase__ ( self) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''')
def UpperCamelCase__ ( self) -> str:
pass
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase , __UpperCamelCase :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :List[Any] = model_class(__lowercase)
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer))
__UpperCamelCase :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , tf.keras.layers.Layer))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase , __UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :int = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase :Optional[int] = [*signature.parameters.keys()]
__UpperCamelCase :Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase)
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def UpperCamelCase__ ( self) -> List[Any]:
__UpperCamelCase :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Tuple = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :str = model_class(__lowercase)
__UpperCamelCase :Optional[int] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Dict = model(__lowercase , noise=__lowercase)
__UpperCamelCase :int = copy.deepcopy(self._prepare_for_class(__lowercase , __lowercase))
__UpperCamelCase :Union[str, Any] = model(**__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = outputs_dict[0].numpy()
__UpperCamelCase :Union[str, Any] = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)) , 1E-6)
def UpperCamelCase__ ( self) -> Optional[int]:
# make the mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :str = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :int = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :str = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
def prepare_numpy_arrays(__lowercase):
__UpperCamelCase :Optional[int] = {}
for k, v in inputs_dict.items():
if tf.is_tensor(__lowercase):
__UpperCamelCase :Optional[Any] = v.numpy()
else:
__UpperCamelCase :Optional[int] = np.array(__lowercase)
return inputs_np_dict
for model_class in self.all_model_classes:
__UpperCamelCase :int = model_class(__lowercase)
__UpperCamelCase :Tuple = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Any = prepare_numpy_arrays(__lowercase)
__UpperCamelCase :Any = model(__lowercase , noise=__lowercase)
__UpperCamelCase :Tuple = model(**__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
# make masks reproducible
np.random.seed(2)
__UpperCamelCase :Any = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
__UpperCamelCase :Optional[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :Dict = tf.constant(__lowercase)
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
__UpperCamelCase :Any = tf_noise
super().check_pt_tf_models(__lowercase , __lowercase , __lowercase)
def UpperCamelCase__ ( self) -> Tuple:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[int] = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(__lowercase)
if module_member_name.endswith('''MainLayer''')
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''')] == model_class.__name__[: -len('''Model''')]
for module_member in (getattr(__lowercase , __lowercase),)
if isinstance(__lowercase , __lowercase)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(__lowercase , '''_keras_serializable''' , __lowercase)
}
__UpperCamelCase :Union[str, Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :List[str] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
__UpperCamelCase :str = tf.convert_to_tensor(__lowercase)
inputs_dict.update({'''noise''': noise})
for main_layer_class in tf_main_layer_classes:
__UpperCamelCase :Optional[int] = main_layer_class(__lowercase)
__UpperCamelCase :Optional[Any] = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
__UpperCamelCase :Dict = tf.keras.Model(__lowercase , outputs=main_layer(__lowercase))
__UpperCamelCase :str = model(__lowercase)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase :str = os.path.join(__lowercase , '''keras_model.h5''')
model.save(__lowercase)
__UpperCamelCase :List[Any] = tf.keras.models.load_model(
__lowercase , custom_objects={main_layer_class.__name__: main_layer_class})
assert isinstance(__lowercase , tf.keras.Model)
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@slow
def UpperCamelCase__ ( self) -> Dict:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :Optional[Any] = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Optional[int] = model_class(__lowercase)
__UpperCamelCase :Union[str, Any] = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :Any = outputs.last_hidden_state.numpy()
__UpperCamelCase :Optional[Any] = 0
else:
__UpperCamelCase :List[str] = outputs.logits.numpy()
__UpperCamelCase :Optional[int] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowercase , saved_model=__lowercase)
__UpperCamelCase :Optional[int] = model_class.from_pretrained(__lowercase)
__UpperCamelCase :List[str] = model(__lowercase , noise=__lowercase)
if model_class.__name__ == "TFViTMAEModel":
__UpperCamelCase :List[Any] = after_outputs['''last_hidden_state'''].numpy()
__UpperCamelCase :List[Any] = 0
else:
__UpperCamelCase :Any = after_outputs['''logits'''].numpy()
__UpperCamelCase :Tuple = 0
__UpperCamelCase :Any = np.amax(np.abs(out_a - out_a))
self.assertLessEqual(__lowercase , 1E-5)
def UpperCamelCase__ ( self) -> Union[str, Any]:
# make mask reproducible
np.random.seed(2)
__UpperCamelCase , __UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase :str = int((config.image_size // config.patch_size) ** 2)
__UpperCamelCase :Optional[int] = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
for model_class in self.all_model_classes:
__UpperCamelCase :Tuple = model_class(__lowercase)
__UpperCamelCase :Any = self._prepare_for_class(__lowercase , __lowercase)
__UpperCamelCase :Tuple = model(__lowercase , noise=__lowercase)
__UpperCamelCase :List[Any] = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(__lowercase)
__UpperCamelCase :Optional[Any] = model_class.from_config(model.get_config())
# make sure it also accepts a normal config
__UpperCamelCase :Any = model_class.from_config(model.config)
__UpperCamelCase :List[Any] = new_model(__lowercase) # Build model
new_model.set_weights(model.get_weights())
__UpperCamelCase :str = new_model(__lowercase , noise=__lowercase)
self.assert_outputs_same(__lowercase , __lowercase)
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''')
def UpperCamelCase__ ( self) -> Dict:
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''')
def UpperCamelCase__ ( self) -> Any:
pass
@slow
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :List[Any] = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''')
self.assertIsNotNone(__lowercase)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self) -> Optional[Any]:
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''') if is_vision_available() else None
@slow
def UpperCamelCase__ ( self) -> List[str]:
# make random mask reproducible across the PT and TF model
np.random.seed(2)
__UpperCamelCase :Optional[Any] = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''')
__UpperCamelCase :Optional[int] = self.default_image_processor
__UpperCamelCase :Optional[int] = prepare_img()
__UpperCamelCase :Optional[int] = image_processor(images=__lowercase , return_tensors='''tf''')
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
__UpperCamelCase :Union[str, Any] = ViTMAEConfig()
__UpperCamelCase :Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2)
__UpperCamelCase :Tuple = np.random.uniform(size=(1, num_patches))
# forward pass
__UpperCamelCase :int = model(**__lowercase , noise=__lowercase)
# verify the logits
__UpperCamelCase :Optional[int] = tf.convert_to_tensor([1, 196, 768])
self.assertEqual(outputs.logits.shape , __lowercase)
__UpperCamelCase :List[Any] = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]])
tf.debugging.assert_near(outputs.logits[0, :3, :3] , __lowercase , atol=1E-4)
| 43
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class lowerCAmelCase__ ( a):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = "vit_mae"
def __init__( self , __lowerCamelCase=7_6_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_2 , __lowerCamelCase=3_0_7_2 , __lowerCamelCase="gelu" , __lowerCamelCase=0.0 , __lowerCamelCase=0.0 , __lowerCamelCase=0.0_2 , __lowerCamelCase=1e-12 , __lowerCamelCase=2_2_4 , __lowerCamelCase=1_6 , __lowerCamelCase=3 , __lowerCamelCase=True , __lowerCamelCase=1_6 , __lowerCamelCase=5_1_2 , __lowerCamelCase=8 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=0.7_5 , __lowerCamelCase=False , **__lowerCamelCase , ) -> int:
super().__init__(**__lowerCamelCase)
_A : int = hidden_size
_A : List[str] = num_hidden_layers
_A : List[Any] = num_attention_heads
_A : Optional[Any] = intermediate_size
_A : Optional[int] = hidden_act
_A : List[Any] = hidden_dropout_prob
_A : List[Any] = attention_probs_dropout_prob
_A : Union[str, Any] = initializer_range
_A : str = layer_norm_eps
_A : Any = image_size
_A : int = patch_size
_A : int = num_channels
_A : Dict = qkv_bias
_A : Tuple = decoder_num_attention_heads
_A : Tuple = decoder_hidden_size
_A : List[str] = decoder_num_hidden_layers
_A : Optional[Any] = decoder_intermediate_size
_A : List[str] = mask_ratio
_A : Union[str, Any] = norm_pix_loss
| 11
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : int = 1_00 ) -> int:
__A : List[str] = n * (n + 1) * (2 * n + 1) / 6
__A : Any = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 190
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''):
lowercase__ : Dict = {
'''linear''': PIL.Image.Resampling.BILINEAR,
'''bilinear''': PIL.Image.Resampling.BILINEAR,
'''bicubic''': PIL.Image.Resampling.BICUBIC,
'''lanczos''': PIL.Image.Resampling.LANCZOS,
'''nearest''': PIL.Image.Resampling.NEAREST,
}
else:
lowercase__ : Any = {
'''linear''': PIL.Image.LINEAR,
'''bilinear''': PIL.Image.BILINEAR,
'''bicubic''': PIL.Image.BICUBIC,
'''lanczos''': PIL.Image.LANCZOS,
'''nearest''': PIL.Image.NEAREST,
}
def _lowerCAmelCase ( __snake_case : Any ) -> Optional[Any]:
__A : Dict = (images / 2 + 0.5).clamp(0 , 1 )
__A : str = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A : Dict = numpy_to_pil(__snake_case )
return images
def _lowerCAmelCase ( __snake_case : List[Any] ) -> Optional[Any]:
if images.ndim == 3:
__A : List[Any] = images[None, ...]
__A : List[str] = (images * 2_55).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A : str = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
__A : str = [Image.fromarray(__snake_case ) for image in images]
return pil_images
| 190
| 1
|
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool:
if not isinstance(snake_case__ , snake_case__ ):
lowerCAmelCase = f"Input value of [number={number}] must be an integer"
raise TypeError(snake_case__ )
if number < 0:
return False
lowerCAmelCase = number * number
while number > 0:
if number % 1_0 != number_square % 1_0:
return False
number //= 1_0
number_square //= 1_0
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
from ...processing_utils import ProcessorMixin
class lowercase_ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ : Optional[int] = """TvltImageProcessor"""
UpperCAmelCase_ : Optional[int] = """TvltFeatureExtractor"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Optional[int]:
super().__init__(image_processor=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = image_processor
lowerCAmelCase = feature_extractor
def __call__( self , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , ) ->List[Any]:
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCAmelCase = None
if images is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , mask_pixel=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if images_mixed is not None:
lowerCAmelCase = self.image_processor(__SCREAMING_SNAKE_CASE , is_mixed=__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
if audio is not None:
lowerCAmelCase = self.feature_extractor(
__SCREAMING_SNAKE_CASE , *__SCREAMING_SNAKE_CASE , sampling_rate=__SCREAMING_SNAKE_CASE , mask_audio=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {}
if audio is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
if images_mixed_dict is not None:
output_dict.update(__SCREAMING_SNAKE_CASE )
return output_dict
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = self.image_processor.model_input_names
lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 338
| 1
|
from __future__ import annotations
__A : Tuple = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __UpperCamelCase ( _A : list[list[int]] , _A : list[int] , _A : list[int] , _A : int , _A : list[list[int]] , ) ->tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
lowerCamelCase_ =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the reference grid
lowerCamelCase_ =1
lowerCamelCase_ =[
[0 for col in range(len(grid[0] ) )] for row in range(len(_A ) )
] # the action grid
lowerCamelCase_ =init[0]
lowerCamelCase_ =init[1]
lowerCamelCase_ =0
lowerCamelCase_ =g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase_ =[[f, g, x, y]]
lowerCamelCase_ =False # flag that is set when search is complete
lowerCamelCase_ =False # flag set if we can't find expand
while not found and not resign:
if len(_A ) == 0:
raise ValueError("""Algorithm is unable to find solution""" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase_ =cell.pop()
lowerCamelCase_ =next_cell[2]
lowerCamelCase_ =next_cell[3]
lowerCamelCase_ =next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase_ =True
else:
for i in range(len(_A ) ): # to try out different valid actions
lowerCamelCase_ =x + DIRECTIONS[i][0]
lowerCamelCase_ =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_A ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase_ =g + cost
lowerCamelCase_ =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase_ =1
lowerCamelCase_ =i
lowerCamelCase_ =[]
lowerCamelCase_ =goal[0]
lowerCamelCase_ =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase_ =x - DIRECTIONS[action[x][y]][0]
lowerCamelCase_ =y - DIRECTIONS[action[x][y]][1]
lowerCamelCase_ =xa
lowerCamelCase_ =ya
invpath.append([x, y] )
lowerCamelCase_ =[]
for i in range(len(_A ) ):
path.append(invpath[len(_A ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A : Any = [0, 0]
# all coordinates are given in format [y,x]
__A : int = [len(grid) - 1, len(grid[0]) - 1]
__A : Tuple = 1
# the cost map which pushes the path closer to the goal
__A : Tuple = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A : str = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A : Optional[int] = 99
__A, __A : Tuple = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 49
|
from __future__ import annotations
def __UpperCamelCase ( _A : list[int | str] ) ->None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def __UpperCamelCase ( _A : list[int | str] , _A : list[int | str] , _A : int , _A : list[int] , ) ->None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
lowerCamelCase_ =True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
lowerCamelCase_ =False
__A : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__A : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 49
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase__: Union[str, Any] = [
"good first issue",
"feature request",
"wip",
]
def snake_case_ ( ) -> int:
UpperCAmelCase : Dict = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase : Any = g.get_repo('''huggingface/accelerate''' )
UpperCAmelCase : str = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase : Optional[int] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCAmelCase : i.created_at , reverse=_lowerCAmelCase )
UpperCAmelCase : Optional[Any] = comments[0] if len(_lowerCAmelCase ) > 0 else None
UpperCAmelCase : Optional[int] = dt.utcnow()
UpperCAmelCase : Union[str, Any] = (current_time - issue.updated_at).days
UpperCAmelCase : Optional[Any] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 23
|
from __future__ import annotations
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int ) -> list[list[int]]:
__snake_case = []
__snake_case = []
__snake_case = 0
__snake_case = sum(snake_case_ )
create_state_space_tree(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
return result
def lowerCamelCase__ ( snake_case_ : list[int] , snake_case_ : int , snake_case_ : int , snake_case_ : list[int] , snake_case_ : list[list[int]] , snake_case_ : int , ) -> None:
if sum(snake_case_ ) > max_sum or (remaining_nums_sum + sum(snake_case_ )) < max_sum:
return
if sum(snake_case_ ) == max_sum:
result.append(snake_case_ )
return
for index in range(snake_case_ , len(snake_case_ ) ):
create_state_space_tree(
snake_case_ , snake_case_ , index + 1 , [*path, nums[index]] , snake_case_ , remaining_nums_sum - nums[index] , )
snake_case_ = [3, 34, 4, 12, 5, 2]
snake_case_ = 9
snake_case_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 24
| 0
|
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__UpperCamelCase : Union[str, Any] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( A_ ):
lowerCAmelCase__ : Dict = _TestCommandArgs(dataset=A_ , all_configs=A_ , save_infos=A_ )
lowerCAmelCase__ : Optional[int] = TestCommand(*A_ )
test_command.run()
lowerCAmelCase__ : int = os.path.join(A_ , '''README.md''' )
assert os.path.exists(A_ )
lowerCAmelCase__ : List[Any] = DatasetInfosDict.from_directory(A_ )
lowerCAmelCase__ : List[str] = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_35_15_63,
'''num_examples''': 1_00_00,
},
{
'''name''': '''validation''',
'''num_bytes''': 23_84_18,
'''num_examples''': 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowerCAmelCase__ : List[Any] = getattr(dataset_infos['''default'''] , A_ ), getattr(expected_dataset_infos['''default'''] , A_ )
if key == "num_bytes":
assert is_apercent_close(A_ , A_ )
elif key == "splits":
assert list(A_ ) == list(A_ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 350
|
"""simple docstring"""
from __future__ import annotations
__UpperCamelCase : Any = 1.6021e-19 # units = C
def __SCREAMING_SNAKE_CASE ( A_ , A_ , A_ , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
def UpperCamelCase ( __lowerCamelCase : Tuple ):
snake_case : List[str] = DPTConfig(embedding_type="hybrid" )
if "large" in checkpoint_url:
snake_case : List[str] = 1024
snake_case : Optional[int] = 4096
snake_case : int = 24
snake_case : Any = 16
snake_case : int = [5, 11, 17, 23]
snake_case : Tuple = [256, 512, 1024, 1024]
snake_case : int = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
snake_case : List[str] = 768
snake_case : Tuple = [1, 1, 1, 0.5]
snake_case : Any = [256, 512, 768, 768]
snake_case : Dict = 150
snake_case : Optional[int] = 16
snake_case : str = (1, 384, 384)
snake_case : List[Any] = False
snake_case : List[str] = "project"
if "ade" in checkpoint_url:
snake_case : Any = True
snake_case : Dict = 768
snake_case : Dict = [1, 1, 1, 0.5]
snake_case : int = 150
snake_case : Union[str, Any] = 16
snake_case : int = "huggingface/label-files"
snake_case : Optional[Any] = "ade20k-id2label.json"
snake_case : Optional[int] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="dataset" ) ) , "r" ) )
snake_case : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
snake_case : Tuple = idalabel
snake_case : Dict = {v: k for k, v in idalabel.items()}
snake_case : List[str] = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase ( __lowerCamelCase : str ):
snake_case : Optional[Any] = ["pretrained.model.head.weight", "pretrained.model.head.bias"]
for k in ignore_keys:
state_dict.pop(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase ( __lowerCamelCase : Optional[Any] ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
snake_case : List[Any] = name.replace("pretrained.model" , "dpt.encoder" )
if "pretrained.model" in name:
snake_case : Any = name.replace("pretrained.model" , "dpt.embeddings" )
if "patch_embed" in name:
snake_case : str = name.replace("patch_embed" , "" )
if "pos_embed" in name:
snake_case : int = name.replace("pos_embed" , "position_embeddings" )
if "attn.proj" in name:
snake_case : List[Any] = name.replace("attn.proj" , "attention.output.dense" )
if "proj" in name and "project" not in name:
snake_case : List[str] = name.replace("proj" , "projection" )
if "blocks" in name:
snake_case : Optional[Any] = name.replace("blocks" , "layer" )
if "mlp.fc1" in name:
snake_case : str = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if "norm1" in name and "backbone" not in name:
snake_case : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name and "backbone" not in name:
snake_case : Dict = name.replace("norm2" , "layernorm_after" )
if "scratch.output_conv" in name:
snake_case : Optional[int] = name.replace("scratch.output_conv" , "head" )
if "scratch" in name:
snake_case : str = name.replace("scratch" , "neck" )
if "layer1_rn" in name:
snake_case : Any = name.replace("layer1_rn" , "convs.0" )
if "layer2_rn" in name:
snake_case : Any = name.replace("layer2_rn" , "convs.1" )
if "layer3_rn" in name:
snake_case : Dict = name.replace("layer3_rn" , "convs.2" )
if "layer4_rn" in name:
snake_case : List[Any] = name.replace("layer4_rn" , "convs.3" )
if "refinenet" in name:
snake_case : str = int(name[len("neck.refinenet" ) : len("neck.refinenet" ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
snake_case : Optional[Any] = name.replace(f"""refinenet{layer_idx}""" , f"""fusion_stage.layers.{abs(layer_idx-4 )}""" )
if "out_conv" in name:
snake_case : Any = name.replace("out_conv" , "projection" )
if "resConfUnit1" in name:
snake_case : List[str] = name.replace("resConfUnit1" , "residual_layer1" )
if "resConfUnit2" in name:
snake_case : Union[str, Any] = name.replace("resConfUnit2" , "residual_layer2" )
if "conv1" in name:
snake_case : str = name.replace("conv1" , "convolution1" )
if "conv2" in name:
snake_case : Dict = name.replace("conv2" , "convolution2" )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
snake_case : Dict = name.replace("pretrained.act_postprocess1.0.project.0" , "neck.reassemble_stage.readout_projects.0.0" )
if "pretrained.act_postprocess2.0.project.0" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess2.0.project.0" , "neck.reassemble_stage.readout_projects.1.0" )
if "pretrained.act_postprocess3.0.project.0" in name:
snake_case : Optional[Any] = name.replace("pretrained.act_postprocess3.0.project.0" , "neck.reassemble_stage.readout_projects.2.0" )
if "pretrained.act_postprocess4.0.project.0" in name:
snake_case : str = name.replace("pretrained.act_postprocess4.0.project.0" , "neck.reassemble_stage.readout_projects.3.0" )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess1.3" , "neck.reassemble_stage.layers.0.projection" )
if "pretrained.act_postprocess1.4" in name:
snake_case : List[Any] = name.replace("pretrained.act_postprocess1.4" , "neck.reassemble_stage.layers.0.resize" )
if "pretrained.act_postprocess2.3" in name:
snake_case : List[str] = name.replace("pretrained.act_postprocess2.3" , "neck.reassemble_stage.layers.1.projection" )
if "pretrained.act_postprocess2.4" in name:
snake_case : Any = name.replace("pretrained.act_postprocess2.4" , "neck.reassemble_stage.layers.1.resize" )
if "pretrained.act_postprocess3.3" in name:
snake_case : Optional[int] = name.replace("pretrained.act_postprocess3.3" , "neck.reassemble_stage.layers.2.projection" )
if "pretrained.act_postprocess4.3" in name:
snake_case : Dict = name.replace("pretrained.act_postprocess4.3" , "neck.reassemble_stage.layers.3.projection" )
if "pretrained.act_postprocess4.4" in name:
snake_case : Optional[int] = name.replace("pretrained.act_postprocess4.4" , "neck.reassemble_stage.layers.3.resize" )
if "pretrained" in name:
snake_case : List[str] = name.replace("pretrained" , "dpt" )
if "bn" in name:
snake_case : Any = name.replace("bn" , "batch_norm" )
if "head" in name:
snake_case : str = name.replace("head" , "head.head" )
if "encoder.norm" in name:
snake_case : Dict = name.replace("encoder.norm" , "layernorm" )
if "auxlayer" in name:
snake_case : Dict = name.replace("auxlayer" , "auxiliary_head.head" )
if "backbone" in name:
snake_case : Dict = name.replace("backbone" , "backbone.bit.encoder" )
if ".." in name:
snake_case : int = name.replace(".." , "." )
if "stem.conv" in name:
snake_case : List[Any] = name.replace("stem.conv" , "bit.embedder.convolution" )
if "blocks" in name:
snake_case : Union[str, Any] = name.replace("blocks" , "layers" )
if "convolution" in name and "backbone" in name:
snake_case : List[str] = name.replace("convolution" , "conv" )
if "layer" in name and "backbone" in name:
snake_case : str = name.replace("layer" , "layers" )
if "backbone.bit.encoder.bit" in name:
snake_case : Any = name.replace("backbone.bit.encoder.bit" , "backbone.bit" )
if "embedder.conv" in name:
snake_case : int = name.replace("embedder.conv" , "embedder.convolution" )
if "backbone.bit.encoder.stem.norm" in name:
snake_case : str = name.replace("backbone.bit.encoder.stem.norm" , "backbone.bit.embedder.norm" )
return name
def UpperCamelCase ( __lowerCamelCase : int , __lowerCamelCase : Tuple ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case : List[str] = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.weight""" )
snake_case : Tuple = state_dict.pop(f"""dpt.encoder.layer.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[str] = in_proj_weight[: config.hidden_size, :]
snake_case : Dict = in_proj_bias[: config.hidden_size]
snake_case : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case : Dict = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ):
snake_case : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case : Union[str, Any] = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : List[str] ):
snake_case , snake_case : Any = get_dpt_config(__lowerCamelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
snake_case : str = torch.load(__lowerCamelCase , map_location="cpu" )
# remove certain keys
remove_ignore_keys_(__lowerCamelCase )
# rename keys
for key in state_dict.copy().keys():
snake_case : Optional[Any] = state_dict.pop(__lowerCamelCase )
snake_case : Optional[int] = val
# read in qkv matrices
read_in_q_k_v(__lowerCamelCase , __lowerCamelCase )
# load HuggingFace model
snake_case : Union[str, Any] = DPTForSemanticSegmentation(__lowerCamelCase ) if "ade" in checkpoint_url else DPTForDepthEstimation(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
# Check outputs on an image
snake_case : Tuple = 480 if "ade" in checkpoint_url else 384
snake_case : Tuple = DPTImageProcessor(size=__lowerCamelCase )
snake_case : int = prepare_img()
snake_case : Tuple = image_processor(__lowerCamelCase , return_tensors="pt" )
# forward pass
snake_case : Tuple = model(**__lowerCamelCase ).logits if "ade" in checkpoint_url else model(**__lowerCamelCase ).predicted_depth
if show_prediction:
snake_case : List[Any] = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode="bicubic" , align_corners=__lowerCamelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__lowerCamelCase )
if push_to_hub:
model.push_to_hub("ybelkada/dpt-hybrid-midas" )
image_processor.push_to_hub("ybelkada/dpt-hybrid-midas" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt""",
type=str,
help="""URL of the original DPT checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=False,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
parser.add_argument(
"""--model_name""",
default="""dpt-large""",
type=str,
help="""Name of the model, in case you're pushing to the hub.""",
)
parser.add_argument(
"""--show_prediction""",
action="""store_true""",
)
__lowerCamelCase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 59
|
from maths.prime_check import is_prime
def snake_case_ ( lowerCAmelCase_ : int ):
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase : Dict = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCAmelCase_ )
if is_prime(lowerCAmelCase_ ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233
| 0
|
'''simple docstring'''
import os
lowerCamelCase_ = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
_A = 0
_A = 0
while index < len(__lowercase ) - 1:
_A = SYMBOLS[numerals[index]]
_A = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = ""
_A = num // 1000
numerals += m_count * "M"
num %= 1000
_A = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_A = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def __lowercase ( __lowercase = "/p089_roman.txt" ) -> int:
'''simple docstring'''
_A = 0
with open(os.path.dirname(__lowercase ) + roman_numerals_filename ) as filea:
_A = filea.readlines()
for line in lines:
_A = line.strip()
_A = parse_roman_numerals(__lowercase )
_A = generate_roman_numerals(__lowercase )
savings += len(__lowercase ) - len(__lowercase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 368
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : str , __UpperCAmelCase : List[Any] ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_A = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(__UpperCAmelCase )
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = "sgugger/tiny-distilbert-classification"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , only_pretrain_model=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Any ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , [config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def lowerCAmelCase ( self : int ):
'''simple docstring'''
_A = "patrickvonplaten/t5-tiny-random"
_A = AutoConfig.from_pretrained(__UpperCAmelCase )
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase , configs=[config] )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU" ) ) == 0 , "Cannot do xla on CPU." )
def lowerCAmelCase ( self : str ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=__UpperCAmelCase , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , use_xla=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , save_to_csv=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(__UpperCAmelCase , "inf_time.csv" ) , inference_memory_csv_file=os.path.join(__UpperCAmelCase , "inf_mem.csv" ) , env_info_csv_file=os.path.join(__UpperCAmelCase , "env.csv" ) , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "env.csv" ) ).exists() )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(__UpperCAmelCase : Any ):
self.assertTrue(hasattr(__UpperCAmelCase , "sequential" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "cumulative" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "current" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_A = TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=__UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(__UpperCAmelCase , "log.txt" ) , log_print=__UpperCAmelCase , trace_memory_line_by_line=__UpperCAmelCase , eager_mode=__UpperCAmelCase , multi_process=__UpperCAmelCase , )
_A = TensorFlowBenchmark(__UpperCAmelCase )
_A = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
self.assertTrue(Path(os.path.join(__UpperCAmelCase , "log.txt" ) ).exists() )
| 174
| 0
|
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Dict = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowercase__ : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _lowerCAmelCase ( __snake_case : str ) -> Tuple:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__A : str = model_type_to_module_name(__snake_case )
__A : List[Any] = importlib.import_module(f'.{module_name}' , 'transformers.models' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__snake_case , '__name__' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__A : Optional[int] = importlib.import_module('transformers' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def _lowerCAmelCase ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Dict , ) -> Optional[Any]:
__A : Dict = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__snake_case , encoding='utf-8' ) as reader:
return json.load(__snake_case )
class SCREAMING_SNAKE_CASE :
def __init__( self):
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.')
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( cls , _UpperCAmelCase , **_UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = kwargs.pop('config' , _UpperCAmelCase)
__A : Optional[int] = kwargs.pop('trust_remote_code' , _UpperCAmelCase)
__A : str = True
__A ,__A : Dict = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase)
__A : Union[str, Any] = config_dict.get('image_processor_type' , _UpperCAmelCase)
__A : Union[str, Any] = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {}):
__A : str = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__A : Optional[Any] = config_dict.pop('feature_extractor_type' , _UpperCAmelCase)
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.')
__A : Tuple = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor')
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {}):
__A : str = config_dict['auto_map']['AutoFeatureExtractor']
__A : Optional[Any] = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor')
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.')
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
# It could be in `config.image_processor_type``
__A : Any = getattr(_UpperCAmelCase , 'image_processor_type' , _UpperCAmelCase)
if hasattr(_UpperCAmelCase , 'auto_map') and "AutoImageProcessor" in config.auto_map:
__A : Tuple = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__A : Dict = image_processor_class_from_name(_UpperCAmelCase)
__A : Any = image_processor_auto_map is not None
__A : Optional[Any] = image_processor_class is not None or type(_UpperCAmelCase) in IMAGE_PROCESSOR_MAPPING
__A : Tuple = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if has_remote_code and trust_remote_code:
__A : List[str] = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase)
__A : Dict = kwargs.pop('code_revision' , _UpperCAmelCase)
if os.path.isdir(_UpperCAmelCase):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase) in IMAGE_PROCESSOR_MAPPING:
__A : List[Any] = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase)]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase)
raise ValueError(
F'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
F'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
F'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}')
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase)
| 190
|
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
lowerCAmelCase = None
class SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
lowerCAmelCase = PandasConfig
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return datasets.DatasetInfo(features=self.config.features)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}')
__A : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_UpperCAmelCase , (str, list, tuple)):
__A : Union[str, Any] = data_files
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Optional[int] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__A : Tuple = []
for split_name, files in data_files.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase):
__A : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A : Optional[Any] = [dl_manager.iter_files(_UpperCAmelCase) for file in files]
splits.append(datasets.SplitGenerator(name=_UpperCAmelCase , gen_kwargs={'files': files}))
return splits
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A : List[str] = table_cast(_UpperCAmelCase , self.config.features.arrow_schema)
return pa_table
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
for i, file in enumerate(itertools.chain.from_iterable(_UpperCAmelCase)):
with open(_UpperCAmelCase , 'rb') as f:
__A : Optional[int] = pa.Table.from_pandas(pd.read_pickle(_UpperCAmelCase))
yield i, self._cast_table(_UpperCAmelCase)
| 190
| 1
|
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
lowercase__ = len(__a ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__a ):
return None
lowercase__ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowercase__ = left
lowercase__ = point
elif point > right:
lowercase__ = right
lowercase__ = point
else:
if item < current_item:
lowercase__ = point - 1
else:
lowercase__ = point + 1
return None
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase__ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__a ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__a , __a , __a , __a )
elif point > right:
return interpolation_search_by_recursion(__a , __a , __a , __a )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__a , __a , __a , point - 1 )
else:
return interpolation_search_by_recursion(
__a , __a , point + 1 , __a )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
if collection != sorted(__a ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase_ = 0
if debug == 1:
lowercase_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
lowercase_ = 67
lowercase_ = interpolation_search(collection, target)
if result is not None:
print(F'{target} found at positions: {result}')
else:
print("""Not found""")
| 350
|
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class _snake_case ( lowercase__):
UpperCamelCase__ : Any ="""perceiver"""
def __init__( self : Any, __lowercase : Optional[Any]=256, __lowercase : List[str]=1280, __lowercase : Dict=768, __lowercase : int=1, __lowercase : Dict=26, __lowercase : Any=8, __lowercase : List[Any]=8, __lowercase : Dict=None, __lowercase : List[Any]=None, __lowercase : str="kv", __lowercase : str=1, __lowercase : Optional[Any]=1, __lowercase : str="gelu", __lowercase : List[str]=0.1, __lowercase : int=0.02, __lowercase : Union[str, Any]=1e-1_2, __lowercase : Optional[Any]=True, __lowercase : Optional[Any]=262, __lowercase : str=2048, __lowercase : Optional[Any]=56, __lowercase : str=[368, 496], __lowercase : str=16, __lowercase : int=1920, __lowercase : Dict=16, __lowercase : List[Any]=[1, 16, 224, 224], **__lowercase : str, ):
super().__init__(**__lowercase )
lowercase__ = num_latents
lowercase__ = d_latents
lowercase__ = d_model
lowercase__ = num_blocks
lowercase__ = num_self_attends_per_block
lowercase__ = num_self_attention_heads
lowercase__ = num_cross_attention_heads
lowercase__ = qk_channels
lowercase__ = v_channels
lowercase__ = cross_attention_shape_for_attention
lowercase__ = self_attention_widening_factor
lowercase__ = cross_attention_widening_factor
lowercase__ = hidden_act
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = use_query_residual
# masked language modeling attributes
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
# image classification attributes
lowercase__ = image_size
# flow attributes
lowercase__ = train_size
# multimodal autoencoding attributes
lowercase__ = num_frames
lowercase__ = audio_samples_per_frame
lowercase__ = samples_per_patch
lowercase__ = output_shape
class _snake_case ( lowercase__):
@property
def A__ ( self : Optional[int] ):
if self.task == "multiple-choice":
lowercase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("inputs", dynamic_axis),
("attention_mask", dynamic_axis),
] )
@property
def A__ ( self : Optional[Any] ):
return 1e-4
def A__ ( self : Tuple, __lowercase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], __lowercase : int = -1, __lowercase : int = -1, __lowercase : int = -1, __lowercase : bool = False, __lowercase : Optional[TensorType] = None, __lowercase : int = 3, __lowercase : int = 40, __lowercase : int = 40, ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(__lowercase, __lowercase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase__ = preprocessor.num_special_tokens_to_add(__lowercase )
lowercase__ = compute_effective_axis_dimension(
__lowercase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
lowercase__ = [" ".join(["a"] ) * seq_length] * batch_size
lowercase__ = dict(preprocessor(__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("input_ids" )
return inputs
elif isinstance(__lowercase, __lowercase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase__ = compute_effective_axis_dimension(__lowercase, fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase__ = self._generate_dummy_images(__lowercase, __lowercase, __lowercase, __lowercase )
lowercase__ = dict(preprocessor(images=__lowercase, return_tensors=__lowercase ) )
lowercase__ = inputs.pop("pixel_values" )
return inputs
else:
raise ValueError(
"Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." )
| 224
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [10, 20, 30, 40, 50, 60]
__a = [2, 4, 6, 8, 10, 12]
__a = 100
self.assertEqual(kp.calc_profit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) , 210)
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Weight can not be negative.''')
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''Profit can not be negative.''')
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
self.assertRaisesRegex(__SCREAMING_SNAKE_CASE , '''max_weight must greater than zero.''')
def _lowerCamelCase ( self : Any):
'''simple docstring'''
self.assertRaisesRegex(
__SCREAMING_SNAKE_CASE , '''The length of profit and weight must be same.''')
if __name__ == "__main__":
unittest.main()
| 49
|
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def _lowerCamelCase ( self : List[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__a = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : Dict):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
__a = '''fp16'''
self.assertTrue(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
__a = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
__a = '''fp16'''
self.assertFalse(is_safetensors_compatible(__SCREAMING_SNAKE_CASE , variant=__SCREAMING_SNAKE_CASE))
| 49
| 1
|
"""simple docstring"""
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class lowerCAmelCase_ ( enum.Enum ):
"""simple docstring"""
_lowerCAmelCase : Tuple = """all_checks"""
_lowerCAmelCase : List[str] = """basic_checks"""
_lowerCAmelCase : Tuple = """no_checks"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict=None ) -> List[Any]:
"""simple docstring"""
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
snake_case = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
snake_case = """ for """ + verification_name if verification_name is not None else """"""
if len(__lowerCAmelCase ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn\'t match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
class lowerCAmelCase_ ( UpperCamelCase_ ):
"""simple docstring"""
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : int ) -> Tuple:
"""simple docstring"""
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise ExpectedMoreSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
if len(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) > 0:
raise UnexpectedSplits(str(set(__lowerCAmelCase ) - set(__lowerCAmelCase ) ) )
snake_case = [
{"""expected""": expected_splits[name], """recorded""": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(__lowerCAmelCase ) > 0:
raise NonMatchingSplitsSizesError(str(__lowerCAmelCase ) )
logger.info('All the splits matched successfully.' )
def lowerCAmelCase__ ( _UpperCamelCase : int , _UpperCamelCase : str = True ) -> dict:
"""simple docstring"""
if record_checksum:
snake_case = shaaaa()
with open(__lowerCAmelCase , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , B'' ):
m.update(__lowerCAmelCase )
snake_case = m.hexdigest()
else:
snake_case = None
return {"num_bytes": os.path.getsize(__lowerCAmelCase ), "checksum": checksum}
def lowerCAmelCase__ ( _UpperCamelCase : Tuple ) -> Optional[int]:
"""simple docstring"""
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 362
|
"""simple docstring"""
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase__ ( _UpperCamelCase : Any ) -> int:
"""simple docstring"""
snake_case = {}
snake_case = job['started_at']
snake_case = job['completed_at']
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = date_parser.parse(_UpperCamelCase )
snake_case = round((end_datetime - start_datetime).total_seconds() / 60.0 )
snake_case = start
snake_case = end
snake_case = duration_in_min
return job_info
def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case = None
if token is not None:
snake_case = {'Accept': 'application/vnd.github+json', 'Authorization': f"""Bearer {token}"""}
snake_case = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
snake_case = requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
snake_case = {}
try:
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
snake_case = math.ceil((result['total_count'] - 1_0_0) / 1_0_0 )
for i in range(_UpperCamelCase ):
snake_case = requests.get(url + f"""&page={i + 2}""" , headers=_UpperCamelCase ).json()
job_time.update({job['name']: extract_time_from_single_job(_UpperCamelCase ) for job in result['jobs']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = get_job_time(args.workflow_run_id)
SCREAMING_SNAKE_CASE__ = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f"""{k}: {v['duration']}""")
| 149
| 0
|
from manim import *
class a__ ( _lowercase ):
"""simple docstring"""
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = Rectangle(height=0.5 , width=0.5 )
A__ = Rectangle(height=0.25 , width=0.25 )
A__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
A__ = [mem.copy() for i in range(6 )]
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
A__ = Text("CPU" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A_ )
A__ = [mem.copy() for i in range(4 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("GPU" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
gpu.move_to([-1, -1, 0] )
self.add(A_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("Model" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
model.move_to([3, -1.0, 0] )
self.add(A_ )
A__ = []
A__ = []
A__ = []
for i, rect in enumerate(A_ ):
rect.set_stroke(A_ )
A__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(A_ , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=A_ )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=A_ , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=A_ , buff=0.0 )
self.add(A_ )
model_cpu_arr.append(A_ )
self.add(*A_ , *A_ , *A_ )
A__ = [mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = Text("Loaded Checkpoint" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
checkpoint.move_to([3, 0.5, 0] )
self.add(A_ )
A__ = []
A__ = []
for i, rect in enumerate(A_ ):
A__ = fill.copy().set_fill(A_ , opacity=0.7 )
target.move_to(A_ )
ckpt_arr.append(A_ )
A__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(A_ )
self.add(*A_ , *A_ )
A__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A__ = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A_ , A_ )
A__ = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(A_ , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A_ )
A__ = MarkupText(
F'Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
A__ = [meta_mem.copy() for i in range(6 )]
A__ = [meta_mem.copy() for i in range(6 )]
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(*A_ ).arrange(A_ , buff=0 )
A__ = VGroup(A_ , A_ ).arrange(A_ , buff=0 )
A__ = Text("Disk" , font_size=24 )
A__ = Group(A_ , A_ ).arrange(A_ , buff=0.5 , aligned_edge=A_ )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(A_ , run_time=3 ) , Write(A_ , run_time=1 ) , Create(A_ , run_time=1 ) )
A__ = []
for i, rect in enumerate(A_ ):
A__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(A_ , run_time=1.5 ) )
self.play(*A_ )
self.play(FadeOut(A_ ) )
A__ = MarkupText(F'Then, the checkpoint is removed from memory\nthrough garbage collection.' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A_ , run_time=3 ) )
self.play(
FadeOut(A_ , A_ , *A_ , *A_ ) , )
self.wait()
| 68
|
"""simple docstring"""
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
_lowercase = re.compile(r'''^(?P<major>\d+)''' r'''\.(?P<minor>\d+)''' r'''\.(?P<patch>\d+)$''')
@total_ordering
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: Optional[str] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
_lowerCamelCase: Optional[Union[str, int]] = None
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A , A , A = _str_to_version_tuple(self.version_str )
def __repr__( self : Optional[int] ) -> Dict:
return F'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.major, self.minor, self.patch
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ) -> Union[str, Any]:
if isinstance(A_ ,A_ ):
return Version(A_ )
elif isinstance(A_ ,A_ ):
return other
raise TypeError(F'{other} (type {type(A_ )}) cannot be compared to version.' )
def __eq__( self : List[Any] ,A_ : Dict ) -> Any:
try:
A = self._validate_operand(A_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : List[Any] ,A_ : Optional[int] ) -> Tuple:
A = self._validate_operand(A_ )
return self.tuple < other.tuple
def __hash__( self : Union[str, Any] ) -> Union[str, Any]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ,A_ : List[str] ) -> List[str]:
A = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return self.version_str
def _snake_case ( snake_case__ : List[str] ):
A = _VERSION_REG.match(snake_case__ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(snake_case__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def _snake_case ( snake_case__ : str ):
return ".".join(str(snake_case__ ) for v in version_tuple )
| 74
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__A : int = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : int = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__A : Dict = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6000,
'return_attention_mask': False,
'do_normalize': True,
}
__A : List[str] = tempfile.mkdtemp()
__A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
__A : int = os.path.join(self.tmpdirname , _UpperCAmelCase)
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
with open(self.feature_extraction_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(_UpperCAmelCase) + '\n')
# load decoder from hub
__A : str = 'hf-internal-testing/ngram-beam-search-decoder'
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
__A : Any = self.add_kwargs_tokens_map.copy()
kwargs.update(_UpperCAmelCase)
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_tokenizer()
__A : Optional[Any] = self.get_feature_extractor()
__A : Union[str, Any] = self.get_decoder()
__A : List[str] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
processor.save_pretrained(self.tmpdirname)
__A : List[str] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname)
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer , _UpperCAmelCase)
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor , _UpperCAmelCase)
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels)
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
processor.save_pretrained(self.tmpdirname)
# make sure that error is thrown when decoder alphabet doesn't match
__A : str = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3)
# decoder
self.assertEqual(processor.language_model.alpha , 5.0)
self.assertEqual(processor.language_model.beta , 3.0)
self.assertEqual(processor.language_model.score_boundary , -7.0)
self.assertEqual(processor.language_model.unk_score_offset , 3)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'])
with self.assertRaisesRegex(_UpperCAmelCase , 'include'):
WavaVecaProcessorWithLM(
tokenizer=_UpperCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder())
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_feature_extractor()
__A : Optional[int] = self.get_tokenizer()
__A : str = self.get_decoder()
__A : Dict = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : Tuple = floats_list((3, 1000))
__A : List[Any] = feature_extractor(_UpperCAmelCase , return_tensors='np')
__A : str = processor(_UpperCAmelCase , return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_feature_extractor()
__A : Tuple = self.get_tokenizer()
__A : str = self.get_decoder()
__A : str = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : Optional[Any] = 'This is a test string'
__A : Optional[Any] = processor(text=_UpperCAmelCase)
__A : Dict = tokenizer(_UpperCAmelCase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase=(2, 10, 16) , _UpperCAmelCase=77):
'''simple docstring'''
np.random.seed(_UpperCAmelCase)
return np.random.rand(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_feature_extractor()
__A : List[str] = self.get_tokenizer()
__A : Union[str, Any] = self.get_decoder()
__A : str = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : Union[str, Any] = self._get_dummy_logits(shape=(10, 16) , seed=13)
__A : Optional[int] = processor.decode(_UpperCAmelCase)
__A : str = decoder.decode_beams(_UpperCAmelCase)[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text)
self.assertEqual('</s> <s> </s>' , decoded_processor.text)
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score)
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score)
@parameterized.expand([[None], ['fork'], ['spawn']])
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Optional[Any] = self.get_feature_extractor()
__A : Tuple = self.get_tokenizer()
__A : int = self.get_decoder()
__A : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : List[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__A : List[str] = processor.batch_decode(_UpperCAmelCase)
else:
with get_context(_UpperCAmelCase).Pool() as pool:
__A : Optional[int] = processor.batch_decode(_UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = list(_UpperCAmelCase)
with get_context('fork').Pool() as p:
__A : Optional[int] = decoder.decode_beams_batch(_UpperCAmelCase , _UpperCAmelCase)
__A ,__A ,__A : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0])
logit_scores_decoder.append(beams[0][-2])
lm_scores_decoder.append(beams[0][-1])
self.assertListEqual(_UpperCAmelCase , decoded_processor.text)
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text)
self.assertListEqual(_UpperCAmelCase , decoded_processor.logit_score)
self.assertListEqual(_UpperCAmelCase , decoded_processor.lm_score)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = self.get_feature_extractor()
__A : List[str] = self.get_tokenizer()
__A : List[Any] = self.get_decoder()
__A : Tuple = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : List[str] = self._get_dummy_logits()
__A : Tuple = 15
__A : Any = -20.0
__A : Dict = -4.0
__A : Dict = processor.batch_decode(
_UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__A : Tuple = decoded_processor_out.text
__A : Optional[Any] = list(_UpperCAmelCase)
with get_context('fork').Pool() as pool:
__A : int = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , beam_width=_UpperCAmelCase , beam_prune_logp=_UpperCAmelCase , token_min_logp=_UpperCAmelCase , )
__A : Tuple = [d[0][0] for d in decoded_decoder_out]
__A : Tuple = [d[0][2] for d in decoded_decoder_out]
__A : Dict = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , _UpperCAmelCase)
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.logit_score))
self.assertTrue(np.allclose([-20.054, -18.447] , _UpperCAmelCase , atol=1e-3))
self.assertTrue(np.array_equal(_UpperCAmelCase , decoded_processor_out.lm_score))
self.assertTrue(np.allclose([-15.554, -13.9474] , _UpperCAmelCase , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[int] = self.get_feature_extractor()
__A : List[str] = self.get_tokenizer()
__A : Union[str, Any] = self.get_decoder()
__A : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
__A : Any = self._get_dummy_logits()
__A : str = 2.0
__A : str = 5.0
__A : Optional[int] = -20.0
__A : Optional[int] = True
__A : Dict = processor.batch_decode(
_UpperCAmelCase , alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
__A : Optional[int] = decoded_processor_out.text
__A : Optional[Any] = list(_UpperCAmelCase)
decoder.reset_params(
alpha=_UpperCAmelCase , beta=_UpperCAmelCase , unk_score_offset=_UpperCAmelCase , lm_score_boundary=_UpperCAmelCase , )
with get_context('fork').Pool() as pool:
__A : Tuple = decoder.decode_beams_batch(
_UpperCAmelCase , _UpperCAmelCase , )
__A : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , _UpperCAmelCase)
__A : Dict = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0)
self.assertEqual(lm_model.beta , 5.0)
self.assertEqual(lm_model.unk_score_offset , -20.0)
self.assertEqual(lm_model.score_boundary , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__A : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key]
__A : int = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
__A : str = os.listdir(_UpperCAmelCase)
__A : Dict = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = snapshot_download('hf-internal-testing/processor_with_lm')
__A : Tuple = WavaVecaProcessorWithLM.from_pretrained(_UpperCAmelCase)
__A : str = processor.decoder.model_container[processor.decoder._model_key]
__A : Tuple = Path(language_model._kenlm_model.path.decode('utf-8')).parent.parent.absolute()
__A : str = os.listdir(_UpperCAmelCase)
__A : Optional[int] = os.listdir(_UpperCAmelCase)
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__A : str = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm')
__A : List[Any] = floats_list((3, 1000))
__A : Optional[Any] = processor_wavaveca(_UpperCAmelCase , return_tensors='np')
__A : Any = processor_auto(_UpperCAmelCase , return_tensors='np')
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2)
__A : Dict = self._get_dummy_logits()
__A : Dict = processor_wavaveca.batch_decode(_UpperCAmelCase)
__A : List[Any] = processor_auto.batch_decode(_UpperCAmelCase)
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = self.get_feature_extractor()
__A : Optional[int] = self.get_tokenizer()
__A : Dict = self.get_decoder()
__A : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , decoder=_UpperCAmelCase)
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = [d[key] for d in offsets]
return retrieved_list
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__A : Dict = self._get_dummy_logits()[0]
__A : Union[str, Any] = processor.decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase))
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word')) , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset') , [1, 3, 5])
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm')
__A : Optional[int] = self._get_dummy_logits()
__A : Optional[Any] = processor.batch_decode(_UpperCAmelCase , output_word_offsets=_UpperCAmelCase)
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys()) , 4)
self.assertTrue('text' in outputs)
self.assertTrue('word_offsets' in outputs)
self.assertTrue(isinstance(_UpperCAmelCase , _UpperCAmelCase))
self.assertListEqual(
[' '.join(self.get_from_offsets(_UpperCAmelCase , 'word')) for o in outputs['word_offsets']] , outputs.text)
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word') , ['<s>', '<s>', '</s>'])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset') , [0, 2, 4])
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset') , [1, 3, 5])
@slow
@require_torch
@require_torchaudio
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
import torch
__A : Union[str, Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=_UpperCAmelCase)
__A : int = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6000))
__A : List[Any] = iter(_UpperCAmelCase)
__A : int = next(_UpperCAmelCase)
__A : List[str] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
__A : int = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm')
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__A : Dict = processor(sample['audio']['array'] , return_tensors='pt').input_values
with torch.no_grad():
__A : str = model(_UpperCAmelCase).logits.cpu().numpy()
__A : Optional[int] = processor.decode(logits[0] , output_word_offsets=_UpperCAmelCase)
__A : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__A : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__A : List[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word')) , _UpperCAmelCase)
self.assertEqual(' '.join(self.get_from_offsets(_UpperCAmelCase , 'word')) , output.text)
# output times
__A : int = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'start_time'))
__A : Dict = torch.tensor(self.get_from_offsets(_UpperCAmelCase , 'end_time'))
# fmt: off
__A : List[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599])
__A : List[str] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94])
# fmt: on
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01))
self.assertTrue(torch.allclose(_UpperCAmelCase , _UpperCAmelCase , atol=0.01))
| 190
|
'''simple docstring'''
import math
def _lowerCAmelCase ( __snake_case : int ) -> int:
if not isinstance(__snake_case , __snake_case ):
__A : List[Any] = f'Input value of [number={number}] must be an integer'
raise TypeError(__snake_case )
if number < 1:
__A : Union[str, Any] = f'Input value of [number={number}] must be > 0'
raise ValueError(__snake_case )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__A : Optional[Any] = int(math.log(number // 3 , 2 ) ) + 2
__A : Union[str, Any] = [3, 5]
__A : List[Any] = 2
__A : Optional[Any] = 3
for block in range(1 , __snake_case ):
for _ in range(__snake_case ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
lowercase__ : str = 0
try:
lowercase__ : List[str] = proth(number)
except ValueError:
print(f"""ValueError: there is no {number}th Proth number""")
continue
print(f"""The {number}th Proth number: {value}""")
| 190
| 1
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
UpperCamelCase__: Tuple = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE( __A ):
"""simple docstring"""
def __init__( self : int , *__snake_case : Optional[Any] , **__snake_case : int ) -> List[Any]:
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , __lowercase , )
super().__init__(*__lowercase , **__lowercase )
| 23
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 360
|
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase_ = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase ( a_ ) -> List[str]:
if isinstance(a_ , torch.Tensor ):
return image
elif isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ = [image]
lowerCAmelCase_ = [trans(img.convert('RGB' ) ) for img in image]
lowerCAmelCase_ = torch.stack(a_ )
return image
class a_ ( a_ ):
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCAmelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowercase_ , scheduler=lowercase_ )
def _lowercase ( self , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f'''The value of strength should in [0.0, 1.0] but is {strength}''' )
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = min(int(num_inference_steps * strength ) , lowercase_ )
lowerCAmelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_=None ) -> Tuple:
'''simple docstring'''
if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f'''`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}''' )
lowerCAmelCase_ = image.to(device=lowercase_ , dtype=lowercase_ )
if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
lowerCAmelCase_ = init_latents.shape
lowerCAmelCase_ = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ )
# get latents
print('add noise to latents at timestep' , lowercase_ )
lowerCAmelCase_ = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ )
lowerCAmelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , lowercase_ = None , lowercase_ = 0.8 , lowercase_ = 1 , lowercase_ = None , lowercase_ = 0.0 , lowercase_ = 5_0 , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowercase_ )
# 2. Preprocess image
lowerCAmelCase_ = preprocess(lowercase_ )
# 3. set timesteps
self.scheduler.set_timesteps(lowercase_ , device=self.device )
lowerCAmelCase_ , lowerCAmelCase_ = self.get_timesteps(lowercase_ , lowercase_ , self.device )
lowerCAmelCase_ = timesteps[:1].repeat(lowercase_ )
# 4. Prepare latent variables
lowerCAmelCase_ = self.prepare_latents(lowercase_ , lowercase_ , lowercase_ , self.unet.dtype , self.device , lowercase_ )
lowerCAmelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(lowercase_ ):
# 1. predict noise model_output
lowerCAmelCase_ = self.unet(lowercase_ , lowercase_ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCAmelCase_ = self.scheduler.step(
lowercase_ , lowercase_ , lowercase_ , eta=lowercase_ , use_clipped_model_output=lowercase_ , generator=lowercase_ , ).prev_sample
lowerCAmelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase_ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowercase_ )
| 14
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def _lowercase ( self : str ):
__lowercase = tempfile.mkdtemp()
__lowercase = 8
# DPR tok
__lowercase = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__lowercase = os.path.join(self.tmpdirname, "dpr_tokenizer" )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
__lowercase = os.path.join(UpperCAmelCase__, DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
__lowercase = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) )
__lowercase = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__lowercase = {"unk_token": "<unk>"}
__lowercase = os.path.join(self.tmpdirname, "bart_tokenizer" )
os.makedirs(UpperCAmelCase__, exist_ok=UpperCAmelCase__ )
__lowercase = os.path.join(UpperCAmelCase__, BART_VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(UpperCAmelCase__, BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def _lowercase ( self : List[str] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer" ) )
def _lowercase ( self : Tuple ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer" ) )
def _lowercase ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer" ) )
def _lowercase ( self : Any ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ):
__lowercase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Dict ):
__lowercase = self.get_dummy_dataset()
__lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__lowercase = dataset
__lowercase = RagRetriever(
UpperCAmelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
return retriever
def _lowercase ( self : List[str], UpperCAmelCase__ : bool ):
__lowercase = self.get_dummy_dataset()
__lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", )
if from_disk:
__lowercase = os.path.join(self.tmpdirname, "dataset" )
__lowercase = os.path.join(self.tmpdirname, "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname, "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset" ) )
del dataset
__lowercase = RagRetriever(
UpperCAmelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), )
else:
__lowercase = RagRetriever(
UpperCAmelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, UpperCAmelCase__ ), )
return retriever
def _lowercase ( self : List[Any] ):
__lowercase = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT )
__lowercase = os.path.join(self.tmpdirname, "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings", index_file_name + ".index.dpr" )
pickle.dump(dataset["id"], open(index_file_name + ".index_meta.dpr", "wb" ) )
__lowercase = os.path.join(self.tmpdirname, "psgs_w100.tsv.pkl" )
__lowercase = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase__, open(UpperCAmelCase__, "wb" ) )
__lowercase = RagConfig(
retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="legacy", index_path=self.tmpdirname, )
__lowercase = RagRetriever(
UpperCAmelCase__, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : List[str] ):
__lowercase = 1
__lowercase = self.get_dummy_canonical_hf_index_retriever()
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase ,__lowercase ,__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def _lowercase ( self : Union[str, Any] ):
__lowercase = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
__lowercase = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase__ )
__lowercase = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ):
__lowercase = 1
__lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase ,__lowercase ,__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def _lowercase ( self : str ):
__lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
__lowercase = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ):
__lowercase = 1
__lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase ,__lowercase ,__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ), UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["id"][0], "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0], "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def _lowercase ( self : Any ):
__lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
__lowercase = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[Any] ):
__lowercase = 1
__lowercase = self.get_dummy_legacy_index_retriever()
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase ,__lowercase ,__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=UpperCAmelCase__ )
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase__ ), 2 )
self.assertEqual(sorted(doc_dicts[0] ), ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ), UpperCAmelCase__ )
self.assertEqual(doc_dicts[0]["text"][0], "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0], "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist(), [[1], [0]] )
def _lowercase ( self : Optional[Any] ):
__lowercase = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase__ )
__lowercase = RagRetriever.from_pretrained(UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever.retrieve(UpperCAmelCase__, n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Tuple ):
import torch
__lowercase = 1
__lowercase = self.get_dummy_canonical_hf_index_retriever()
__lowercase = [[5, 7], [1_0, 1_1]]
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever(UpperCAmelCase__, UpperCAmelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCAmelCase__ )
__lowercase ,__lowercase ,__lowercase = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, UpperCAmelCase__ )
self.assertIsInstance(UpperCAmelCase__, np.ndarray )
__lowercase = retriever(
UpperCAmelCase__, UpperCAmelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCAmelCase__, return_tensors="pt", )
__lowercase ,__lowercase ,__lowercase ,__lowercase = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
self.assertIsInstance(UpperCAmelCase__, torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Any ):
__lowercase = self.get_dpr_ctx_encoder_tokenizer()
__lowercase = 1
__lowercase = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase__ )
__lowercase = [[5, 7], [1_0, 1_1]]
__lowercase = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )], dtype=np.floataa )
__lowercase = retriever(UpperCAmelCase__, UpperCAmelCase__, prefix=retriever.config.generator.prefix, n_docs=UpperCAmelCase__ )
self.assertEqual(
len(UpperCAmelCase__ ), 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ), UpperCAmelCase__ ) # check for doc token related keys in dictionary.
| 17
|
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : Any=3_2 , SCREAMING_SNAKE_CASE_ : int=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_0 , SCREAMING_SNAKE_CASE_ : Dict=[1_0, 2_0, 3_0, 4_0] , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2, 1] , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : int="relu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : List[str]=None , ):
lowerCAmelCase_ : Optional[int] = parent
lowerCAmelCase_ : Union[str, Any] = batch_size
lowerCAmelCase_ : Optional[int] = image_size
lowerCAmelCase_ : Optional[int] = num_channels
lowerCAmelCase_ : Any = embeddings_size
lowerCAmelCase_ : Dict = hidden_sizes
lowerCAmelCase_ : Any = depths
lowerCAmelCase_ : Optional[int] = is_training
lowerCAmelCase_ : int = use_labels
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Optional[int] = scope
lowerCAmelCase_ : Tuple = len(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : List[Any] = TFRegNetModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : int = TFRegNetForImageClassification(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : str = self.prepare_config_and_inputs()
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = config_and_inputs
lowerCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = TFRegNetModelTester(self )
lowerCAmelCase_ : Tuple = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
return
@unittest.skip(reason='RegNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
super().test_keras_fit()
@unittest.skip(reason='RegNet does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : int = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase_ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : List[Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , training=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowerCAmelCase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
lowerCAmelCase_ ,lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[int] = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowerCAmelCase_ : str = layer_type
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ ,lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str={} ):
lowerCAmelCase_ : Optional[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Union[str, Any] = model_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Tuple = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
lowerCAmelCase_ : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : int = TFRegNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def UpperCamelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowerCAmelCase_ : Tuple = self.default_image_processor
lowerCAmelCase_ : Dict = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='tf' )
# forward pass
lowerCAmelCase_ : List[str] = model(**SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCAmelCase_ : Optional[Any] = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = tf.constant([-0.41_80, -1.50_51, -3.48_36] )
tf.debugging.assert_near(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 224
| 0
|
from __future__ import annotations
__A = 10
def __a ( lowerCAmelCase_ : str ) -> Dict:
'''simple docstring'''
UpperCAmelCase_= 1
UpperCAmelCase_= max(__lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCAmelCase_= [[] for _ in range(__lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCAmelCase_= int((i / placement) % RADIX )
buckets[tmp].append(__lowerCamelCase )
# put each buckets' contents into list_of_ints
UpperCAmelCase_= 0
for b in range(__lowerCamelCase ):
for i in buckets[b]:
UpperCAmelCase_= i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] ) -> List[str]:
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCAmelCase_= DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , __UpperCAmelCase ):
UpperCAmelCase_= (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCAmelCase_= (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(__UpperCAmelCase , __UpperCAmelCase ) and len(__UpperCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCAmelCase_= randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , eta=__UpperCAmelCase , use_clipped_model_output=__UpperCAmelCase , generator=__UpperCAmelCase ).prev_sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 277
| 0
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
a__ : str =[
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'''
''' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'''
''' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.''',
'''The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'''
''' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'''
''' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'''
''' body.''',
'''Amnesty International releases its annual report on the death penalty. The report catalogs the use of'''
''' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'''
''' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'''
''' punishment.''',
]
a__ : List[str] =[
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'''
''' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'''
''' had informed his Lufthansa training school of an episode of severe depression, airline says .''',
'''Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'''
''' Israel and the United States opposed the move, which could open the door to war crimes investigations against'''
''' Israelis .''',
'''Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'''
''' death . Organization claims that governments around the world are using the threat of terrorism to advance'''
''' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'''
''' sentences up by 28% .''',
]
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , bootstrap_aggregation=__lowercase , rouge_keys=['rouge2', 'rougeL'] )
assert isinstance(__lowercase , __lowercase )
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , bootstrap_aggregation=__lowercase , rouge_keys=['rouge2'] )
assert (
pd.DataFrame(no_aggregation['rouge2'] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra['rouge2'] ).fmeasure.mean()
)
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCamelCase = 'rougeLsum'
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=[k] )[k]
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = ['rouge1', 'rouge2', 'rougeL']
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=__lowercase )
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase , rouge_keys=__lowercase )
assert score_sep == score_no_sep
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
__UpperCamelCase = [
'Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.',
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .',
]
__UpperCamelCase = [
'Margot Frank, died in 1945, a month earlier than previously thought.',
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'
' the final seconds on board Flight 9525.',
]
assert calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase ) == calculate_rouge(__lowercase , __lowercase , newline_sep=__lowercase )
def lowercase__ ( ) -> int:
"""simple docstring"""
__UpperCamelCase = [
'" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '
]
__UpperCamelCase = [
' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'
]
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , rouge_keys=['rougeLsum'] , newline_sep=__lowercase )['rougeLsum']
__UpperCamelCase = calculate_rouge(__lowercase , __lowercase , rouge_keys=['rougeLsum'] )['rougeLsum']
assert new_score > prev_score
def lowercase__ ( ) -> str:
"""simple docstring"""
__UpperCamelCase = Path('examples/seq2seq/test_data/wmt_en_ro' )
__UpperCamelCase = calculate_rouge_path(data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) )
assert isinstance(__lowercase , __lowercase )
__UpperCamelCase = calculate_rouge_path(
data_dir.joinpath('test.source' ) , data_dir.joinpath('test.target' ) , bootstrap_aggregation=__lowercase )
assert isinstance(__lowercase , __lowercase )
| 53
|
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _a ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__):
"""simple docstring"""
UpperCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self: Tuple , __lowerCamelCase: int , __lowerCamelCase: int , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 5_0257 , __lowerCamelCase: int = 1024 , __lowerCamelCase: int = 768 , __lowerCamelCase: int = 12 , __lowerCamelCase: int = 12 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: str = "gelu_new" , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 0.1 , __lowerCamelCase: float = 1e-5 , __lowerCamelCase: float = 0.02 , __lowerCamelCase: bool = True , __lowerCamelCase: bool = True , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , ):
'''simple docstring'''
super().__init__()
UpperCamelCase__: Union[str, Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
F" `n_embd`: {n_embd} are not equal." )
UpperCamelCase__: List[str] = prefix_inner_dim
UpperCamelCase__: Optional[int] = prefix_hidden_dim
UpperCamelCase__: Dict = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCamelCase__: Tuple = (
nn.Linear(self.prefix_hidden_dim , __lowerCamelCase ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCamelCase__: List[str] = GPTaConfig(
vocab_size=__lowerCamelCase , n_positions=__lowerCamelCase , n_embd=__lowerCamelCase , n_layer=__lowerCamelCase , n_head=__lowerCamelCase , n_inner=__lowerCamelCase , activation_function=__lowerCamelCase , resid_pdrop=__lowerCamelCase , embd_pdrop=__lowerCamelCase , attn_pdrop=__lowerCamelCase , layer_norm_epsilon=__lowerCamelCase , initializer_range=__lowerCamelCase , scale_attn_weights=__lowerCamelCase , use_cache=__lowerCamelCase , scale_attn_by_inverse_layer_idx=__lowerCamelCase , reorder_and_upcast_attn=__lowerCamelCase , )
UpperCamelCase__: Any = GPTaLMHeadModel(__lowerCamelCase )
def UpperCAmelCase_ ( self: int , __lowerCamelCase: torch.Tensor , __lowerCamelCase: torch.Tensor , __lowerCamelCase: Optional[torch.Tensor] = None , __lowerCamelCase: Optional[torch.Tensor] = None , ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
UpperCamelCase__: Dict = self.encode_prefix(__lowerCamelCase )
UpperCamelCase__: List[Any] = self.decode_prefix(__lowerCamelCase )
UpperCamelCase__: str = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCamelCase__: Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCamelCase__: Any = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCamelCase__: str = self.transformer(inputs_embeds=__lowerCamelCase , labels=__lowerCamelCase , attention_mask=__lowerCamelCase )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: int , __lowerCamelCase: torch.device ):
'''simple docstring'''
return torch.zeros(__lowerCamelCase , self.prefix_length , dtype=torch.intaa , device=__lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] , __lowerCamelCase: List[Any] ):
'''simple docstring'''
return self.encode_prefix(__lowerCamelCase )
@torch.no_grad()
def UpperCAmelCase_ ( self: List[str] , __lowerCamelCase: Dict , __lowerCamelCase: Any , __lowerCamelCase: List[str] ):
'''simple docstring'''
UpperCamelCase__: Any = torch.split(__lowerCamelCase , 1 , dim=0 )
UpperCamelCase__: Dict = []
UpperCamelCase__: Union[str, Any] = []
for feature in features:
UpperCamelCase__: Tuple = self.decode_prefix(feature.to(__lowerCamelCase ) ) # back to the clip feature
# Only support beam search for now
UpperCamelCase__ , UpperCamelCase__: List[Any] = self.generate_beam(
input_embeds=__lowerCamelCase , device=__lowerCamelCase , eos_token_id=__lowerCamelCase )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
UpperCamelCase__: str = torch.stack(__lowerCamelCase )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def UpperCAmelCase_ ( self: str , __lowerCamelCase: Optional[Any]=None , __lowerCamelCase: Optional[int]=None , __lowerCamelCase: Dict=None , __lowerCamelCase: int = 5 , __lowerCamelCase: int = 67 , __lowerCamelCase: float = 1.0 , __lowerCamelCase: Optional[int] = None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = eos_token_id
UpperCamelCase__: List[str] = None
UpperCamelCase__: Any = None
UpperCamelCase__: Optional[int] = torch.ones(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.int )
UpperCamelCase__: Dict = torch.zeros(__lowerCamelCase , device=__lowerCamelCase , dtype=torch.bool )
if input_embeds is not None:
UpperCamelCase__: Dict = input_embeds
else:
UpperCamelCase__: Optional[int] = self.transformer.transformer.wte(__lowerCamelCase )
for i in range(__lowerCamelCase ):
UpperCamelCase__: Union[str, Any] = self.transformer(inputs_embeds=__lowerCamelCase )
UpperCamelCase__: Tuple = outputs.logits
UpperCamelCase__: Dict = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCamelCase__: List[str] = logits.softmax(-1 ).log()
if scores is None:
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = logits.topk(__lowerCamelCase , -1 )
UpperCamelCase__: str = generated.expand(__lowerCamelCase , *generated.shape[1:] )
UpperCamelCase__ , UpperCamelCase__: Dict = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCamelCase__: int = next_tokens
else:
UpperCamelCase__: Optional[int] = tokens.expand(__lowerCamelCase , *tokens.shape[1:] )
UpperCamelCase__: str = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCamelCase__: Optional[Any] = -float(np.inf )
UpperCamelCase__: Any = 0
UpperCamelCase__: List[str] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCamelCase__: Any = scores_sum / seq_lengths[:, None]
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = scores_sum_average.view(-1 ).topk(__lowerCamelCase , -1 )
UpperCamelCase__: Dict = next_tokens // scores_sum.shape[1]
UpperCamelCase__: Optional[int] = seq_lengths[next_tokens_source]
UpperCamelCase__: int = next_tokens % scores_sum.shape[1]
UpperCamelCase__: Optional[int] = next_tokens.unsqueeze(1 )
UpperCamelCase__: Tuple = tokens[next_tokens_source]
UpperCamelCase__: Tuple = torch.cat((tokens, next_tokens) , dim=1 )
UpperCamelCase__: List[Any] = generated[next_tokens_source]
UpperCamelCase__: int = scores_sum_average * seq_lengths
UpperCamelCase__: Dict = is_stopped[next_tokens_source]
UpperCamelCase__: List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCamelCase__: Any = torch.cat((generated, next_token_embed) , dim=1 )
UpperCamelCase__: Union[str, Any] = is_stopped + next_tokens.eq(__lowerCamelCase ).squeeze()
if is_stopped.all():
break
UpperCamelCase__: Optional[Any] = scores / seq_lengths
UpperCamelCase__: int = scores.argsort(descending=__lowerCamelCase )
# tokens tensors are already padded to max_seq_length
UpperCamelCase__: Dict = [tokens[i] for i in order]
UpperCamelCase__: Any = torch.stack(__lowerCamelCase , dim=0 )
UpperCamelCase__: int = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 149
| 0
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
def __init__( self , *_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict:
'''simple docstring'''
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = eval_examples
UpperCAmelCase : Optional[Any] = post_process_function
UpperCAmelCase : Dict = quant_trainer_args
UpperCAmelCase : List[Any] = 128 # default number of calibration samples
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> Dict:
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
UpperCAmelCase : Union[str, Any] = calib_dataset if calib_dataset is not None else self.calib_dataset
UpperCAmelCase : Optional[int] = self._remove_unused_columns(_SCREAMING_SNAKE_CASE , description="""Calibration""" )
return DataLoader(
_SCREAMING_SNAKE_CASE , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=_SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Tuple = self.train_dataset if calib_dataset is None else calib_dataset
UpperCAmelCase : Optional[Any] = self.get_calib_dataloader(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Any = self.model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args , calib=_SCREAMING_SNAKE_CASE )
model.eval()
quant_trainer.enable_calibration(_SCREAMING_SNAKE_CASE )
logger.info("""***** Running calibration *****""" )
logger.info(F" Num examples = {self.calib_num}" )
logger.info(F" Batch size = {calib_dataloader.batch_size}" )
for step, inputs in enumerate(_SCREAMING_SNAKE_CASE ):
# Prediction step
UpperCAmelCase : Union[str, Any] = self.prediction_step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prediction_loss_only=_SCREAMING_SNAKE_CASE )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
UpperCAmelCase : Tuple = model
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "eval" ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase : List[str] = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : List[Any] = self.compute_metrics
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : Any = eval_loop(
_SCREAMING_SNAKE_CASE , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
UpperCAmelCase : str = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
UpperCAmelCase : Tuple = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions )
UpperCAmelCase : List[str] = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase : str = metrics.pop(_SCREAMING_SNAKE_CASE )
self.log(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase : List[str] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase : str = self.callback_handler.on_evaluate(self.args , self.state , self.control , _SCREAMING_SNAKE_CASE )
return metrics
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = "test" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = self.get_test_dataloader(_SCREAMING_SNAKE_CASE )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase : Any = self.compute_metrics
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase : Optional[int] = eval_loop(
_SCREAMING_SNAKE_CASE , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_SCREAMING_SNAKE_CASE , )
finally:
UpperCAmelCase : Union[str, Any] = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase : Tuple = self.post_process_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , output.predictions , """predict""" )
UpperCAmelCase : str = self.compute_metrics(_SCREAMING_SNAKE_CASE )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"{metric_key_prefix}_" ):
UpperCAmelCase : Tuple = metrics.pop(_SCREAMING_SNAKE_CASE )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE="./" ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = self.eval_dataset
UpperCAmelCase : Any = self.get_eval_dataloader(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = next(iter(_SCREAMING_SNAKE_CASE ) )
# saving device - to make it consistent
UpperCAmelCase : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
UpperCAmelCase : Tuple = tuple(v.to(_SCREAMING_SNAKE_CASE ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : Tuple = self.model.to(_SCREAMING_SNAKE_CASE )
model.eval()
model.float()
UpperCAmelCase : Optional[Any] = model.module if hasattr(_SCREAMING_SNAKE_CASE , """module""" ) else model
quant_trainer.configure_model(_SCREAMING_SNAKE_CASE , self.quant_trainer_args )
UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """model.onnx""" )
logger.info(F"exporting model to {output_model_file}" )
UpperCAmelCase : int = {0: '''batch_size''', 1: '''seq_len'''}
torch.onnx.export(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , export_params=_SCREAMING_SNAKE_CASE , opset_version=13 , do_constant_folding=_SCREAMING_SNAKE_CASE , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=_SCREAMING_SNAKE_CASE , )
logger.info("""onnx export finished""" )
| 371
|
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE = 6 ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
self.create_linked_list(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = Node()
UpperCAmelCase : Dict = current_node
UpperCAmelCase : Any = current_node
UpperCAmelCase : Optional[int] = current_node
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Optional[Any] = Node()
UpperCAmelCase : Tuple = current_node
UpperCAmelCase : Any = previous_node
UpperCAmelCase : List[Any] = current_node
UpperCAmelCase : List[str] = self.front
UpperCAmelCase : Tuple = previous_node
def SCREAMING_SNAKE_CASE ( self ) -> bool:
'''simple docstring'''
return (
self.front == self.rear
and self.front is not None
and self.front.data is None
)
def SCREAMING_SNAKE_CASE ( self ) -> Any | None:
'''simple docstring'''
self.check_can_perform_operation()
return self.front.data if self.front else None
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if self.rear is None:
return
self.check_is_full()
if not self.is_empty():
UpperCAmelCase : Optional[Any] = self.rear.next
if self.rear:
UpperCAmelCase : Optional[int] = data
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
self.check_can_perform_operation()
if self.rear is None or self.front is None:
return None
if self.front == self.rear:
UpperCAmelCase : Tuple = self.front.data
UpperCAmelCase : int = None
return data
UpperCAmelCase : Dict = self.front
UpperCAmelCase : Tuple = old_front.next
UpperCAmelCase : str = old_front.data
UpperCAmelCase : int = None
return data
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.is_empty():
raise Exception("""Empty Queue""" )
def SCREAMING_SNAKE_CASE ( self ) -> None:
'''simple docstring'''
if self.rear and self.rear.next == self.front:
raise Exception("""Full Queue""" )
class SCREAMING_SNAKE_CASE__ :
def __init__( self ) -> None:
'''simple docstring'''
UpperCAmelCase : Any | None = None
UpperCAmelCase : Node | None = None
UpperCAmelCase : Node | None = None
if __name__ == "__main__":
import doctest
doctest.testmod()
| 76
| 0
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase__ : List[str] = '''src/diffusers'''
lowercase__ : Union[str, Any] = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
lowercase__ : List[Any] = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase__ : Optional[int] = spec.loader.load_module()
def _lowerCAmelCase ( __snake_case : Optional[Any] , __snake_case : List[str] ) -> Optional[int]:
return line.startswith(__snake_case ) or len(__snake_case ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , __snake_case ) is not None
def _lowerCAmelCase ( __snake_case : Optional[int] ) -> Tuple:
__A : Dict = object_name.split('.' )
__A : str = 0
# First let's find the module where our object lives.
__A : int = parts[i]
while i < len(__snake_case ) and not os.path.isfile(os.path.join(__snake_case , f'{module}.py' ) ):
i += 1
if i < len(__snake_case ):
__A : str = os.path.join(__snake_case , parts[i] )
if i >= len(__snake_case ):
raise ValueError(f'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(__snake_case , f'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Dict = f.readlines()
# Now let's find the class / func in the code!
__A : Tuple = ''
__A : int = 0
for name in parts[i + 1 :]:
while (
line_index < len(__snake_case ) and re.search(rf'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__snake_case ):
raise ValueError(f' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__A : List[str] = line_index
while line_index < len(__snake_case ) and _should_continue(lines[line_index] , __snake_case ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : List[str] = lines[start_index:line_index]
return "".join(__snake_case )
lowercase__ : int = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
lowercase__ : List[str] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
lowercase__ : Union[str, Any] = re.compile(r'''<FILL\s+[^>]*>''')
def _lowerCAmelCase ( __snake_case : Tuple ) -> Tuple:
__A : List[Any] = code.split('\n' )
__A : List[Any] = 0
while idx < len(__snake_case ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__snake_case ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _lowerCAmelCase ( __snake_case : Union[str, Any] ) -> Tuple:
__A : Optional[int] = len(get_indent(__snake_case ) ) > 0
if has_indent:
__A : List[str] = f'class Bla:\n{code}'
__A : Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__snake_case )
__A : List[Any] = black.format_str(__snake_case , mode=__snake_case )
__A ,__A : int = style_docstrings_in_code(__snake_case )
return result[len('class Bla:\n' ) :] if has_indent else result
def _lowerCAmelCase ( __snake_case : List[Any] , __snake_case : Union[str, Any]=False ) -> Union[str, Any]:
with open(__snake_case , 'r' , encoding='utf-8' , newline='\n' ) as f:
__A : Optional[int] = f.readlines()
__A : Union[str, Any] = []
__A : Any = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__snake_case ):
__A : Tuple = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__A ,__A ,__A : Optional[int] = search.groups()
__A : List[str] = find_code_in_diffusers(__snake_case )
__A : Optional[Any] = get_indent(__snake_case )
__A : str = line_index + 1 if indent == theoretical_indent else line_index + 2
__A : str = theoretical_indent
__A : Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__A : Tuple = True
while line_index < len(__snake_case ) and should_continue:
line_index += 1
if line_index >= len(__snake_case ):
break
__A : List[str] = lines[line_index]
__A : str = _should_continue(__snake_case , __snake_case ) and re.search(f'^{indent}# End copy' , __snake_case ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
__A : Optional[Any] = lines[start_index:line_index]
__A : List[str] = ''.join(__snake_case )
# Remove any nested `Copied from` comments to avoid circular copies
__A : Dict = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__snake_case ) is None]
__A : List[Any] = '\n'.join(__snake_case )
# Before comparing, use the `replace_pattern` on the original code.
if len(__snake_case ) > 0:
__A : Tuple = replace_pattern.replace('with' , '' ).split(',' )
__A : Any = [_re_replace_pattern.search(__snake_case ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__A ,__A ,__A : Union[str, Any] = pattern.groups()
__A : Union[str, Any] = re.sub(__snake_case , __snake_case , __snake_case )
if option.strip() == "all-casing":
__A : Optional[int] = re.sub(obja.lower() , obja.lower() , __snake_case )
__A : Any = re.sub(obja.upper() , obja.upper() , __snake_case )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__A : Any = blackify(lines[start_index - 1] + theoretical_code )
__A : List[Any] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
__A : Dict = lines[:start_index] + [theoretical_code] + lines[line_index:]
__A : int = start_index + 1
if overwrite and len(__snake_case ) > 0:
# Warn the user a file has been modified.
print(f'Detected changes, rewriting {filename}.' )
with open(__snake_case , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__snake_case )
return diffs
def _lowerCAmelCase ( __snake_case : bool = False ) -> List[str]:
__A : int = glob.glob(os.path.join(__snake_case , '**/*.py' ) , recursive=__snake_case )
__A : Union[str, Any] = []
for filename in all_files:
__A : List[str] = is_copy_consistent(__snake_case , __snake_case )
diffs += [f'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(__snake_case ) > 0:
__A : List[Any] = '\n'.join(__snake_case )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowercase__ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 190
|
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE (a__ ):
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = tempfile.mkdtemp()
__A : str = 5
# Realm tok
__A : Union[str, Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__A : Union[str, Any] = os.path.join(self.tmpdirname , 'realm_tokenizer')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
__A : Tuple = os.path.join(_UpperCAmelCase , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__A : int = os.path.join(self.tmpdirname , 'realm_block_records')
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'realm_tokenizer'))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = RealmConfig(num_block_records=self.num_block_records)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = Dataset.from_dict(
{
'id': ['0', '1'],
'question': ['foo', 'bar'],
'answers': [['Foo', 'Bar'], ['Bar']],
})
return dataset
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = np.array(
[
B'This is the first record',
B'This is the second record',
B'This is the third record',
B'This is the fourth record',
B'This is the fifth record',
B'This is a longer longer longer record',
] , dtype=_UpperCAmelCase , )
return block_records
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.get_config()
__A : str = self.get_dummy_retriever()
__A : List[str] = retriever.tokenizer
__A : Dict = np.array([0, 3] , dtype='long')
__A : Dict = tokenizer(['Test question']).input_ids
__A : Optional[Any] = tokenizer(
['the fourth'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : str = config.reader_seq_len
__A ,__A ,__A ,__A : List[str] = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(len(_UpperCAmelCase) , 2)
self.assertEqual(concat_inputs.input_ids.shape , (2, 10))
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10))
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10))
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10))
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'first', 'record', '[SEP]'] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1]) , ['[CLS]', 'test', 'question', '[SEP]', 'this', 'is', 'the', 'fourth', 'record', '[SEP]'] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.get_config()
__A : Any = self.get_dummy_retriever()
__A : str = retriever.tokenizer
__A : Dict = np.array([0, 3, 5] , dtype='long')
__A : Tuple = tokenizer(['Test question']).input_ids
__A : Union[str, Any] = tokenizer(
['the fourth', 'longer longer'] , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase , return_attention_mask=_UpperCAmelCase , ).input_ids
__A : Dict = config.reader_seq_len
__A ,__A ,__A ,__A : str = retriever(
_UpperCAmelCase , _UpperCAmelCase , answer_ids=_UpperCAmelCase , max_length=_UpperCAmelCase , return_tensors='np')
self.assertEqual([False, True, True] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _UpperCAmelCase)
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
# Test local path
__A : str = retriever.from_pretrained(os.path.join(self.tmpdirname , 'realm_block_records'))
self.assertEqual(retriever.block_records[0] , B'This is the first record')
# Test mocked remote path
with patch('transformers.models.realm.retrieval_realm.hf_hub_download') as mock_hf_hub_download:
__A : int = os.path.join(
os.path.join(self.tmpdirname , 'realm_block_records') , _REALM_BLOCK_RECORDS_FILENAME)
__A : Tuple = RealmRetriever.from_pretrained('google/realm-cc-news-pretrained-openqa')
self.assertEqual(retriever.block_records[0] , B'This is the first record')
| 190
| 1
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Tuple=100 , UpperCamelCase : List[Any]=13 , UpperCamelCase : List[str]=30 , UpperCamelCase : List[str]=2 , UpperCamelCase : Dict=3 , UpperCamelCase : List[str]=True , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Tuple=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Optional[int]=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=10 , UpperCamelCase : List[str]=0.02 , UpperCamelCase : List[str]=3 , ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Any = vocab_size
__UpperCAmelCase : List[Any] = batch_size
__UpperCAmelCase : Dict = image_size
__UpperCAmelCase : Tuple = patch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Optional[int] = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Optional[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Optional[Any] = intermediate_size
__UpperCAmelCase : Dict = hidden_act
__UpperCAmelCase : Optional[int] = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : int = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCAmelCase : Any = (image_size // patch_size) ** 2
__UpperCAmelCase : Tuple = num_patches + 1
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Optional[int] = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : Optional[Any] = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = FlaxBeitModel(config=UpperCamelCase )
__UpperCAmelCase : Optional[Any] = model(UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : str ):
'''simple docstring'''
__UpperCAmelCase : int = FlaxBeitForMaskedImageModeling(config=UpperCamelCase )
__UpperCAmelCase : List[str] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.type_sequence_label_size
__UpperCAmelCase : str = FlaxBeitForImageClassification(config=UpperCamelCase )
__UpperCAmelCase : Optional[int] = model(UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCAmelCase : Optional[Any] = 1
__UpperCAmelCase : Optional[int] = FlaxBeitForImageClassification(UpperCamelCase )
__UpperCAmelCase : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCAmelCase : int = model(UpperCamelCase )
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
__UpperCAmelCase
) : List[str] = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase__ ( A , unittest.TestCase ):
"""simple docstring"""
__a = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : List[str] = FlaxBeitModelTester(self )
__UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase , has_text_modality=UpperCamelCase , hidden_size=37 )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class(UpperCamelCase )
__UpperCAmelCase : Any = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__UpperCAmelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase , UpperCamelCase )
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase )
@jax.jit
def model_jitted(UpperCamelCase : Optional[Any] , **UpperCamelCase : Tuple ):
return model(pixel_values=UpperCamelCase , **UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
__UpperCAmelCase : str = model_jitted(**UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__UpperCAmelCase : Any = model_jitted(**UpperCamelCase ).to_tuple()
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase )
def lowerCamelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase )
@slow
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__UpperCAmelCase : Tuple = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
__UpperCAmelCase : Dict = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(UpperCamelCase )
def lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
__UpperCAmelCase : Optional[int] = self.default_image_processor
__UpperCAmelCase : Tuple = prepare_img()
__UpperCAmelCase : str = image_processor(images=UpperCamelCase , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
__UpperCAmelCase : Union[str, Any] = np.ones((1, 196) , dtype=UpperCamelCase )
# forward pass
__UpperCAmelCase : Tuple = model(pixel_values=UpperCamelCase , bool_masked_pos=UpperCamelCase )
__UpperCAmelCase : Dict = outputs.logits
# verify the logits
__UpperCAmelCase : Any = (1, 196, 8_192)
self.assertEqual(logits.shape , UpperCamelCase )
__UpperCAmelCase : Any = np.array(
[[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , UpperCamelCase , atol=1e-2 ) )
@slow
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Any = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
__UpperCAmelCase : int = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase , return_tensors="""np""" )
# forward pass
__UpperCAmelCase : Any = model(**UpperCamelCase )
__UpperCAmelCase : Tuple = outputs.logits
# verify the logits
__UpperCAmelCase : Optional[int] = (1, 1_000)
self.assertEqual(logits.shape , UpperCamelCase )
__UpperCAmelCase : str = np.array([-1.2385, -1.0987, -1.0108] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
__UpperCAmelCase : int = 281
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase )
@slow
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : str = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
__UpperCAmelCase : Dict = self.default_image_processor
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : List[Any] = image_processor(images=UpperCamelCase , return_tensors="""np""" )
# forward pass
__UpperCAmelCase : str = model(**UpperCamelCase )
__UpperCAmelCase : Optional[int] = outputs.logits
# verify the logits
__UpperCAmelCase : List[Any] = (1, 21_841)
self.assertEqual(logits.shape , UpperCamelCase )
__UpperCAmelCase : Tuple = np.array([1.6881, -0.2787, 0.5901] )
self.assertTrue(np.allclose(logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
__UpperCAmelCase : str = 2_396
self.assertEqual(logits.argmax(-1 ).item() , UpperCamelCase )
| 357
|
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCAmelCase : Optional[Any] = 'scheduler_config.json'
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 1
__a = 2
__a = 3
__a = 4
__a = 5
__a = 6
__a = 7
__a = 8
__a = 9
__a = 10
__a = 11
__a = 12
__a = 13
__a = 14
@dataclass
class lowerCamelCase__ ( A ):
"""simple docstring"""
__a = 42
class lowerCamelCase__ :
"""simple docstring"""
__a = SCHEDULER_CONFIG_NAME
__a = []
__a = True
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ):
'''simple docstring'''
__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , )
return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase )
def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase )
@property
def lowerCamelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowerCamelCase__ ( cls : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase : List[str] = [
getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase )
]
return compatible_classes
| 320
| 0
|
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A__ : List[Any] = """sshleifer/bart-tiny-random"""
A__ : List[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return AutoConfig.from_pretrained(UpperCAmelCase__ )
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , *lowerCamelCase_ : int =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ , *lowerCamelCase_ : List[Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ , *lowerCamelCase_ : Union[str, Any] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCAmelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ , *lowerCamelCase_ : Optional[int] =create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def UpperCAmelCase__ ( self : str ):
with self.assertRaises(UpperCAmelCase__ ):
create_student_by_copying_alternating_layers(UpperCAmelCase__ , tempfile.mkdtemp() , e=UpperCAmelCase__ , d=UpperCAmelCase__ )
| 144
|
from ...processing_utils import ProcessorMixin
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = '''SpeechT5FeatureExtractor'''
UpperCAmelCase__ = '''SpeechT5Tokenizer'''
def __init__( self : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__)
def __call__( self : Dict , *UpperCAmelCase__ : List[str] , **UpperCAmelCase__ : Any) ->Optional[Any]:
'''simple docstring'''
A__ = kwargs.pop('''audio''' , UpperCAmelCase__)
A__ = kwargs.pop('''text''' , UpperCAmelCase__)
A__ = kwargs.pop('''text_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''audio_target''' , UpperCAmelCase__)
A__ = kwargs.pop('''sampling_rate''' , UpperCAmelCase__)
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''')
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''')
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''')
if audio is not None:
A__ = self.feature_extractor(UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
elif text is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if audio_target is not None:
A__ = self.feature_extractor(audio_target=UpperCAmelCase__ , *UpperCAmelCase__ , sampling_rate=UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_values''']
elif text_target is not None:
A__ = self.tokenizer(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : int) ->Optional[int]:
'''simple docstring'''
A__ = kwargs.pop('''input_values''' , UpperCAmelCase__)
A__ = kwargs.pop('''input_ids''' , UpperCAmelCase__)
A__ = kwargs.pop('''labels''' , UpperCAmelCase__)
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''')
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''')
if input_values is not None:
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
elif input_ids is not None:
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
else:
A__ = None
if labels is not None:
if "input_ids" in labels or (isinstance(UpperCAmelCase__ , UpperCAmelCase__) and "input_ids" in labels[0]):
A__ = self.tokenizer.pad(UpperCAmelCase__ , **UpperCAmelCase__)
A__ = targets['''input_ids''']
else:
A__ = self.feature_extractor.feature_size
A__ = self.feature_extractor.num_mel_bins
A__ = self.feature_extractor.pad(UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__)
A__ = feature_size_hack
A__ = targets['''input_values''']
else:
A__ = None
if inputs is None:
return targets
if targets is not None:
A__ = labels
A__ = targets.get('''attention_mask''')
if decoder_attention_mask is not None:
A__ = decoder_attention_mask
return inputs
def SCREAMING_SNAKE_CASE ( self : Any , *UpperCAmelCase__ : Dict , **UpperCAmelCase__ : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Union[str, Any]) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__)
| 14
| 0
|
def lowerCAmelCase_ ( __A ) -> tuple[int, int]:
'''simple docstring'''
try:
UpperCAmelCase__ = float(__A )
except ValueError:
raise ValueError("Please enter a valid number" )
UpperCAmelCase__ = decimal - int(__A )
if fractional_part == 0:
return int(__A ), 1
else:
UpperCAmelCase__ = len(str(__A ).split("." )[1] )
UpperCAmelCase__ = int(decimal * (10**number_of_frac_digits) )
UpperCAmelCase__ = 10**number_of_frac_digits
UpperCAmelCase__ , UpperCAmelCase__ = denominator, numerator
while True:
UpperCAmelCase__ = dividend % divisor
if remainder == 0:
break
UpperCAmelCase__ , UpperCAmelCase__ = divisor, remainder
UpperCAmelCase__ , UpperCAmelCase__ = numerator / divisor, denominator / divisor
return int(__A ), int(__A )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction("67") = }''')
print(f'''{decimal_to_fraction("45.0") = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction("6.25") = }''')
print(f'''{decimal_to_fraction("78td") = }''')
| 143
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase__ = {'tokenization_bertweet': ['BertweetTokenizer']}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 143
| 1
|
"""simple docstring"""
lowerCAmelCase_ = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 16
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """philschmid/bart-large-cnn-samsum"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
_SCREAMING_SNAKE_CASE = """summarizer"""
_SCREAMING_SNAKE_CASE = AutoTokenizer
_SCREAMING_SNAKE_CASE = AutoModelForSeqaSeqLM
_SCREAMING_SNAKE_CASE = ["""text"""]
_SCREAMING_SNAKE_CASE = ["""text"""]
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Any:
return self.pre_processor(_snake_case, return_tensors='pt', truncation=_snake_case )
def lowercase_ ( self : int, _snake_case : List[Any] ) ->Any:
return self.model.generate(**_snake_case )[0]
def lowercase_ ( self : int, _snake_case : int ) ->str:
return self.pre_processor.decode(_snake_case, skip_special_tokens=_snake_case, clean_up_tokenization_spaces=_snake_case )
| 277
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _snake_case (_lowercase , unittest.TestCase):
# TODO: is there an appropriate internal test set?
__A : List[Any] ="ssube/stable-diffusion-x4-upscaler-onnx"
def UpperCamelCase__ ( self ,_snake_case=0 ):
UpperCAmelCase_ : List[str] = floats_tensor((1, 3, 1_28, 1_28) ,rng=random.Random(__UpperCamelCase ) )
UpperCAmelCase_ : str = torch.manual_seed(__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase_ : Optional[Any] = pipe(**__UpperCamelCase ).images
UpperCAmelCase_ : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : List[str] = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
UpperCAmelCase_ : List[str] = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : Dict = self.get_dummy_inputs()
UpperCAmelCase_ : Optional[int] = pipe(**__UpperCamelCase ).images
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array(
[0.6898892, 0.59240556, 0.52499527, 0.58866215, 0.52258235, 0.52572715, 0.62414473, 0.6174387, 0.6214964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
UpperCAmelCase_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase_ : List[str] = pipe(**__UpperCamelCase ).images
UpperCAmelCase_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Dict = np.array(
[0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
UpperCAmelCase_ : Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs()
UpperCAmelCase_ : Union[str, Any] = pipe(**__UpperCamelCase ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : int = np.array(
[0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider="CPUExecutionProvider" )
UpperCAmelCase_ : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase_ : str = pipe(**__UpperCamelCase ).images
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : int = np.array(
[0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class _snake_case (unittest.TestCase):
@property
def UpperCamelCase__ ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = ort.SessionOptions()
UpperCAmelCase_ : Union[str, Any] = False
return options
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ : List[str] = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
UpperCAmelCase_ : List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : str = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase_ : Optional[int] = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__UpperCamelCase ,output_type="np" ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Union[str, Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_ : List[Any] = init_image.resize((1_28, 1_28) )
UpperCAmelCase_ : int = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,subfolder="scheduler" )
UpperCAmelCase_ : str = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" ,scheduler=__UpperCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCAmelCase_ : Dict = "A fantasy landscape, trending on artstation"
UpperCAmelCase_ : List[str] = torch.manual_seed(0 )
UpperCAmelCase_ : int = pipe(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__UpperCamelCase ,output_type="np" ,)
UpperCAmelCase_ : Any = output.images
UpperCAmelCase_ : Optional[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCAmelCase_ : Union[str, Any] = np.array(
[0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 360
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ) -> str | Literal[False]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
UpperCAmelCase_ : List[str] = "_"
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : List[str] = []
while True:
UpperCAmelCase_ : Any = ["$"] * len(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
UpperCAmelCase_ : Union[str, Any] = "*"
UpperCAmelCase_ : List[Any] = "*"
temp.append("X" )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
UpperCAmelCase_ : str = list(set(_SCREAMING_SNAKE_CASE ) )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Sequence[float] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = []
for minterm in minterms:
UpperCAmelCase_ : Optional[Any] = ""
for _ in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def a__ ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ) -> bool:
"""simple docstring"""
UpperCAmelCase_ : Tuple = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = list(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def a__ ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[str]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
UpperCAmelCase_ : str = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
UpperCAmelCase_ : Optional[Any] = 0
UpperCAmelCase_ : Optional[int] = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
UpperCAmelCase_ : Any = j
if count == 1:
UpperCAmelCase_ : Any = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
temp.append(prime_implicants[i] )
while True:
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[str] = -1
UpperCAmelCase_ : int = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : str = chart[i].count(1 )
if count_n > max_n:
UpperCAmelCase_ : List[str] = count_n
UpperCAmelCase_ : Dict = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : int = 0
def a__ ( _SCREAMING_SNAKE_CASE : list[str] , _SCREAMING_SNAKE_CASE : list[str] ) -> list[list[int]]:
"""simple docstring"""
UpperCAmelCase_ : Dict = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase_ : Optional[int] = prime_implicants[i].count("_" )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = 1
return chart
def a__ ( ) -> None:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = int(input("Enter the no. of variables\n" ) )
UpperCAmelCase_ : Tuple = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
"Enter the decimal representation of Minterms 'Spaces Separated'\n" ).split()
]
UpperCAmelCase_ : int = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = check(_SCREAMING_SNAKE_CASE )
print("Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print("Essential Prime Implicants are:" )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 67
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a_ = abspath(join(dirname(dirname(__file__)), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_a)
def lowerCamelCase__ ( _a):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Union[str, Any] = terminalreporter.config.getoption("--make-reports")
if make_reports:
pytest_terminal_summary_main(_a , id=_a)
| 76
| 0
|
"""simple docstring"""
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ : Tuple = pad_token_id
UpperCAmelCase__ : Any = max_length
UpperCAmelCase__ : str = vocab
UpperCAmelCase__ : Union[str, Any] = merges
UpperCAmelCase__ : Tuple = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()]
UpperCAmelCase__ : Tuple = tokenizer.get_vocab()
return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
@classmethod
def _a (cls , _lowerCamelCase ):
"""simple docstring"""
return cls(**_lowerCamelCase )
def _a (self ):
"""simple docstring"""
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _a (self , _lowerCamelCase , _lowerCamelCase = None ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = self.tf_tokenizer(_lowerCamelCase )
UpperCAmelCase__ : Optional[Any] = tf.ones_like(_lowerCamelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
UpperCAmelCase__ : Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
UpperCAmelCase__ , UpperCAmelCase__ : str = pad_model_inputs(
_lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 166
|
"""simple docstring"""
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
_A = """__DUMMY_TRANSFORMERS_USER__"""
_A = """Dummy User"""
_A = """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"""
_A = """https://hub-ci.huggingface.co"""
_A = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}"""
_A = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}"""
_A = Path("""~/.huggingface/hub_ci_token""").expanduser()
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , lowerCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[Any]:
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> str:
HfFolder.save_token(lowerCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def a__ ( ) -> List[str]:
return HfApi(endpoint=lowerCAmelCase )
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : List[str] = HfFolder.get_token()
HfFolder.save_token(lowerCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(lowerCAmelCase )
@pytest.fixture
def a__ ( lowerCAmelCase ) -> List[str]:
def _cleanup_repo(lowerCAmelCase ):
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def a__ ( lowerCAmelCase ) -> Optional[Any]:
@contextmanager
def _temporary_repo(lowerCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(lowerCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
UpperCAmelCase__ : str = F"""repo_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : List[str] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : List[Any] = F"""repo_zipped_txt_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Any = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
UpperCAmelCase__ : Union[str, Any] = F"""repo_zipped_img_data-{int(time.time() * 10E3 )}"""
UpperCAmelCase__ : Optional[int] = F"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" , private=lowerCAmelCase )
hf_api.upload_file(
token=lowerCAmelCase , path_or_fileobj=str(lowerCAmelCase ) , path_in_repo="""data.zip""" , repo_id=lowerCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(lowerCAmelCase , token=lowerCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
return hf_private_dataset_repo_zipped_img_data_
| 166
| 1
|
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=13, lowercase_=30, lowercase_=2, lowercase_=3, lowercase_=True, lowercase_=True, lowercase_=32, lowercase_=5, lowercase_=4, lowercase_=37, lowercase_="gelu", lowercase_=0.1, lowercase_=0.1, lowercase_=10, lowercase_=0.02, lowercase_=3, lowercase_=None, lowercase_=2, ) -> Optional[int]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =image_size
a__ =patch_size
a__ =num_channels
a__ =is_training
a__ =use_labels
a__ =hidden_size
a__ =num_hidden_layers
a__ =num_attention_heads
a__ =intermediate_size
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =type_sequence_label_size
a__ =initializer_range
a__ =scope
a__ =encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a__ =(image_size // patch_size) ** 2
a__ =num_patches + 2
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__ =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ =None
if self.use_labels:
a__ =ids_tensor([self.batch_size], self.type_sequence_label_size )
a__ =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Any:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__UpperCAmelCase, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[int]:
"""simple docstring"""
a__ =DeiTModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ =model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Optional[Any]:
"""simple docstring"""
a__ =DeiTForMaskedImageModeling(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ =model(__UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
a__ =1
a__ =DeiTForMaskedImageModeling(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ =model(__UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape, (self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_ ) -> Dict:
"""simple docstring"""
a__ =self.type_sequence_label_size
a__ =DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ =model(__UpperCAmelCase, labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a__ =1
a__ =DeiTForImageClassification(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
a__ =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a__ =model(__UpperCAmelCase, labels=__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> Dict:
"""simple docstring"""
a__ =self.prepare_config_and_inputs()
(
(
a__
), (
a__
), (
a__
),
) =config_and_inputs
a__ ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCamelCase__ : Tuple = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Any = False
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =DeiTModelTester(self )
a__ =ConfigTester(self, config_class=__UpperCAmelCase, has_text_modality=__UpperCAmelCase, hidden_size=37 )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def _UpperCAmelCase ( self ) -> List[Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
a__ =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase, nn.Linear ) )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ =model_class(__UpperCAmelCase )
a__ =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ =[*signature.parameters.keys()]
a__ =['''pixel_values''']
self.assertListEqual(arg_names[:1], __UpperCAmelCase )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCAmelCase )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase )
def _UpperCAmelCase ( self, lowercase_, lowercase_, lowercase_=False ) -> Dict:
"""simple docstring"""
a__ =super()._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__UpperCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a__ =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a__ =self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
a__ =model(**__UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a__ =False
a__ =True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a__ =model_class(__UpperCAmelCase )
model.gradient_checkpointing_enable()
model.to(__UpperCAmelCase )
model.train()
a__ =self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
a__ =model(**__UpperCAmelCase ).loss
loss.backward()
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__, a__ =self.model_tester.prepare_config_and_inputs_for_common()
a__ =[
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__UpperCAmelCase ),
*get_values(__UpperCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
a__ =problem_type['''title''']
a__ =problem_type['''num_labels''']
a__ =model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
a__ =self._prepare_for_class(__UpperCAmelCase, __UpperCAmelCase, return_labels=__UpperCAmelCase )
if problem_type["num_labels"] > 1:
a__ =inputs['''labels'''].unsqueeze(1 ).repeat(1, problem_type['''num_labels'''] )
a__ =inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__UpperCAmelCase ) as warning_list:
a__ =model(**__UpperCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ =DeiTModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase__ ( ):
'''simple docstring'''
a__ =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =DeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' ).to(
__UpperCAmelCase )
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=__UpperCAmelCase, return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
a__ =model(**__UpperCAmelCase )
# verify the logits
a__ =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, __UpperCAmelCase )
a__ =torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], __UpperCAmelCase, atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =DeiTModel.from_pretrained(
'''facebook/deit-base-distilled-patch16-224''', torch_dtype=torch.floataa, device_map='''auto''' )
a__ =self.default_image_processor
a__ =prepare_img()
a__ =image_processor(images=__UpperCAmelCase, return_tensors='''pt''' )
a__ =inputs.pixel_values.to(__UpperCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
a__ =model(__UpperCAmelCase )
| 188
|
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__snake_case = logging.get_logger('''transformers.models.speecht5''')
__snake_case = {
'''speech_encoder_prenet.layer_norm''': '''speecht5.encoder.prenet.feature_projection.layer_norm''',
'''speech_encoder_prenet.post_extract_proj''': '''speecht5.encoder.prenet.feature_projection.projection''',
'''speech_encoder_prenet.pos_conv.0''': '''speecht5.encoder.prenet.pos_conv_embed.conv''',
'''speech_encoder_prenet.mask_emb''': '''speecht5.encoder.prenet.masked_spec_embed''',
}
__snake_case = {
'''text_encoder_prenet.encoder_prenet.0''': '''speecht5.encoder.prenet.embed_tokens''',
'''text_encoder_prenet.encoder_prenet.1.alpha''': '''speecht5.encoder.prenet.encode_positions.alpha''',
}
__snake_case = {
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0''': '''speecht5.decoder.prenet.layers.0''',
'''speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0''': '''speecht5.decoder.prenet.layers.1''',
'''speech_decoder_prenet.decoder_prenet.0.1''': '''speecht5.decoder.prenet.final_layer''',
'''speech_decoder_prenet.decoder_prenet.1.alpha''': '''speecht5.decoder.prenet.encode_positions.alpha''',
'''speech_decoder_prenet.spkembs_layer.0''': '''speecht5.decoder.prenet.speaker_embeds_layer''',
}
__snake_case = {
'''speech_decoder_postnet.feat_out''': '''speech_decoder_postnet.feat_out''',
'''speech_decoder_postnet.prob_out''': '''speech_decoder_postnet.prob_out''',
'''speech_decoder_postnet.postnet.postnet.0.0''': '''speech_decoder_postnet.layers.0.conv''',
'''speech_decoder_postnet.postnet.postnet.0.1''': '''speech_decoder_postnet.layers.0.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.1.0''': '''speech_decoder_postnet.layers.1.conv''',
'''speech_decoder_postnet.postnet.postnet.1.1''': '''speech_decoder_postnet.layers.1.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.2.0''': '''speech_decoder_postnet.layers.2.conv''',
'''speech_decoder_postnet.postnet.postnet.2.1''': '''speech_decoder_postnet.layers.2.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.3.0''': '''speech_decoder_postnet.layers.3.conv''',
'''speech_decoder_postnet.postnet.postnet.3.1''': '''speech_decoder_postnet.layers.3.batch_norm''',
'''speech_decoder_postnet.postnet.postnet.4.0''': '''speech_decoder_postnet.layers.4.conv''',
'''speech_decoder_postnet.postnet.postnet.4.1''': '''speech_decoder_postnet.layers.4.batch_norm''',
}
__snake_case = {
'''text_decoder_prenet.embed_tokens''': '''speecht5.decoder.prenet.embed_tokens''',
}
__snake_case = {
'''text_decoder_postnet.output_projection''': '''text_decoder_postnet.lm_head''',
}
__snake_case = {
'''encoder.layers.*.self_attn.k_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj''',
'''encoder.layers.*.self_attn.v_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj''',
'''encoder.layers.*.self_attn.q_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj''',
'''encoder.layers.*.self_attn.out_proj''': '''speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj''',
'''encoder.layers.*.self_attn_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.layer_norm''',
'''encoder.layers.*.fc1''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense''',
'''encoder.layers.*.fc2''': '''speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense''',
'''encoder.layers.*.final_layer_norm''': '''speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''speecht5.encoder.wrapped_encoder.layer_norm''',
'''encoder.pos_emb.pe_k''': '''speecht5.encoder.wrapped_encoder.embed_positions.pe_k''',
}
__snake_case = {
'''decoder.layers.*.self_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj''',
'''decoder.layers.*.self_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj''',
'''decoder.layers.*.self_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj''',
'''decoder.layers.*.self_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj''',
'''decoder.layers.*.self_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm''',
'''decoder.layers.*.encoder_attn.k_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj''',
'''decoder.layers.*.encoder_attn.v_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj''',
'''decoder.layers.*.encoder_attn.q_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj''',
'''decoder.layers.*.encoder_attn.out_proj''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj''',
'''decoder.layers.*.encoder_attn_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm''',
'''decoder.layers.*.fc1''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense''',
'''decoder.layers.*.fc2''': '''speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense''',
'''decoder.layers.*.final_layer_norm''': '''speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm''',
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__snake_case = []
__snake_case = [
'''encoder.version''',
'''encoder.layers.*.norm_k.weight''',
'''encoder.layers.*.norm_k.bias''',
'''decoder.version''',
'''decoder.layers.*.norm_k.weight''',
'''decoder.layers.*.norm_k.bias''',
'''decoder.pos_emb.pe_k''',
'''speech_encoder_prenet.embed_positions._float_tensor''',
'''text_decoder_prenet.embed_positions._float_tensor''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''speech_decoder_prenet.*''',
'''speech_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''speech_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
__snake_case = IGNORE_KEYS + [
'''encoder.proj''',
'''text_encoder_prenet.*''',
'''text_decoder_prenet.*''',
'''text_decoder_postnet.*''',
]
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple, _lowerCAmelCase : Dict, _lowerCAmelCase : Optional[int] ):
"""simple docstring"""
for attribute in key.split('''.''' ):
_a = getattr(_lowerCAmelCase, _lowerCAmelCase )
if weight_type is not None:
_a = getattr(_lowerCAmelCase, _lowerCAmelCase ).shape
else:
_a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
_a = value
elif weight_type == "weight_g":
_a = value
elif weight_type == "weight_v":
_a = value
elif weight_type == "bias":
_a = value
elif weight_type == "running_mean":
_a = value
elif weight_type == "running_var":
_a = value
elif weight_type == "num_batches_tracked":
_a = value
else:
_a = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( _lowerCAmelCase : Any, _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : int ):
"""simple docstring"""
_a = []
if task == "s2t":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2T
_a = IGNORE_KEYS_S2T
elif task == "t2s":
_a = None
_a = MAPPING_T2S
_a = IGNORE_KEYS_T2S
elif task == "s2s":
_a = hf_model.speechta.encoder.prenet.feature_encoder
_a = MAPPING_S2S
_a = IGNORE_KEYS_S2S
else:
raise ValueError(f'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(_lowerCAmelCase, _lowerCAmelCase ):
logger.info(f'{name} was ignored' )
continue
_a = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, hf_model.config.feat_extract_norm == '''group''', )
_a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
_a , _a = key.split('''.*.''' )
if prefix in name and suffix in name:
_a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
_a = True
if "*" in mapped_key:
_a = name.split(_lowerCAmelCase )[0].split('''.''' )[-2]
_a = mapped_key.replace('''*''', _lowerCAmelCase )
if "weight_g" in name:
_a = '''weight_g'''
elif "weight_v" in name:
_a = '''weight_v'''
elif "bias" in name:
_a = '''bias'''
elif "weight" in name:
_a = '''weight'''
elif "running_mean" in name:
_a = '''running_mean'''
elif "running_var" in name:
_a = '''running_var'''
elif "num_batches_tracked" in name:
_a = '''num_batches_tracked'''
else:
_a = None
set_recursively(_lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase, _lowerCAmelCase )
continue
if not is_used:
unused_weights.append(_lowerCAmelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def A_ ( _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Optional[Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any], _lowerCAmelCase : List[Any] ):
"""simple docstring"""
_a = full_name.split('''conv_layers.''' )[-1]
_a = name.split('''.''' )
_a = int(items[0] )
_a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
_a = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCAmelCase )
@torch.no_grad()
def A_ ( _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Union[str, Any], _lowerCAmelCase : Dict, _lowerCAmelCase : List[Any]=None, _lowerCAmelCase : List[str]=None, _lowerCAmelCase : int=None, ):
"""simple docstring"""
if config_path is not None:
_a = SpeechTaConfig.from_pretrained(_lowerCAmelCase )
else:
_a = SpeechTaConfig()
if task == "s2t":
_a = config.max_text_positions
_a = SpeechTaForSpeechToText(_lowerCAmelCase )
elif task == "t2s":
_a = 18_76
_a = 6_00
_a = config.max_speech_positions
_a = SpeechTaForTextToSpeech(_lowerCAmelCase )
elif task == "s2s":
_a = 18_76
_a = config.max_speech_positions
_a = SpeechTaForSpeechToSpeech(_lowerCAmelCase )
else:
raise ValueError(f'Unknown task name: {task}' )
if vocab_path:
_a = SpeechTaTokenizer(_lowerCAmelCase, model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
_a = AddedToken('''<mask>''', lstrip=_lowerCAmelCase, rstrip=_lowerCAmelCase )
_a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
_a = SpeechTaFeatureExtractor()
_a = SpeechTaProcessor(tokenizer=_lowerCAmelCase, feature_extractor=_lowerCAmelCase )
processor.save_pretrained(_lowerCAmelCase )
_a = torch.load(_lowerCAmelCase )
recursively_load_weights(fairseq_checkpoint['''model'''], _lowerCAmelCase, _lowerCAmelCase )
model.save_pretrained(_lowerCAmelCase )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(_lowerCAmelCase )
model.push_to_hub(_lowerCAmelCase )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--task''',
default='''s2t''',
type=str,
help='''Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.''',
)
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--vocab_path''', default=None, type=str, help='''Path to SentencePiece model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
__snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 320
| 0
|
"""simple docstring"""
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ =[
['attention', 'attn'],
['encoder_attention', 'encoder_attn'],
['q_lin', 'q_proj'],
['k_lin', 'k_proj'],
['v_lin', 'v_proj'],
['out_lin', 'out_proj'],
['norm_embeddings', 'layernorm_embedding'],
['position_embeddings', 'embed_positions'],
['embeddings', 'embed_tokens'],
['ffn.lin', 'fc'],
]
def lowerCamelCase__ (__lowerCamelCase ):
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
_SCREAMING_SNAKE_CASE : str = k.replace(__lowerCamelCase, __lowerCamelCase )
if k.startswith("encoder" ):
_SCREAMING_SNAKE_CASE : List[str] = k.replace(".attn", ".self_attn" )
_SCREAMING_SNAKE_CASE : List[str] = k.replace("norm1", "self_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : Optional[int] = k.replace("norm2", "final_layer_norm" )
elif k.startswith("decoder" ):
_SCREAMING_SNAKE_CASE : int = k.replace("norm1", "self_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : Dict = k.replace("norm2", "encoder_attn_layer_norm" )
_SCREAMING_SNAKE_CASE : str = k.replace("norm3", "final_layer_norm" )
return k
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[Any] = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
_SCREAMING_SNAKE_CASE : Tuple = sd.pop(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = k.replace("layernorm_embedding", "layer_norm" )
assert new_k not in sd
_SCREAMING_SNAKE_CASE : Optional[int] = v
UpperCamelCase__ =['START']
@torch.no_grad()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
_SCREAMING_SNAKE_CASE : str = torch.load(__lowerCamelCase, map_location="cpu" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = model["model"]
_SCREAMING_SNAKE_CASE : Optional[Any] = BlenderbotConfig.from_json_file(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = BlenderbotForConditionalGeneration(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = m.model.state_dict().keys()
_SCREAMING_SNAKE_CASE : List[str] = []
_SCREAMING_SNAKE_CASE : List[str] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
_SCREAMING_SNAKE_CASE : Optional[int] = rename_state_dict_key(__lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
_SCREAMING_SNAKE_CASE : List[Any] = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(__lowerCamelCase )
m.model.load_state_dict(__lowerCamelCase, strict=__lowerCamelCase )
m.half()
m.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument('--src_path', type=str, help='like blenderbot-model.bin')
parser.add_argument('--save_dir', default='hf_blenderbot', type=str, help='Where to save converted model.')
parser.add_argument(
'--hf_config_json', default='blenderbot-3b-config.json', type=str, help='Path to config to use'
)
UpperCamelCase__ =parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 356
|
from __future__ import annotations
import math
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if len(__lowerCamelCase ) == 0:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
return min(
minimax(depth + 1, node_index * 2, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), minimax(depth + 1, node_index * 2 + 1, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ), )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : Union[str, Any] = [90, 23, 6, 33, 21, 65, 123, 34423]
_SCREAMING_SNAKE_CASE : Tuple = math.log(len(__lowerCamelCase ), 2 )
print("Optimal value : ", end="" )
print(minimax(0, 0, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 325
| 0
|
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowerCAmelCase__ : Optional[Any] = logging.getLogger(__name__)
def UpperCamelCase__ ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> Optional[Any]:
snake_case__ : Any = bnb_quantization_config.load_in_abit
snake_case__ : Dict = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
snake_case__ : List[Any] = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
snake_case__ : Any = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
snake_case__ : Union[str, Any] = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
snake_case__ : Union[str, Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
snake_case__ : str = []
snake_case__ : Optional[int] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
snake_case__ : str = load_in_abit
snake_case__ : Optional[int] = load_in_abit
snake_case__ : List[Any] = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
snake_case__ : Any = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
snake_case__ : Tuple = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
snake_case__ : Optional[Any] = name.replace('.weight' , '' ).replace('.bias' , '' )
snake_case__ : List[str] = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
snake_case__ : Dict = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
snake_case__ : Tuple = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None , A__=None ) -> Union[str, Any]:
if device_map is None:
if torch.cuda.is_available():
snake_case__ : List[str] = {'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
snake_case__ : int = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
snake_case__ : Optional[Any] = {}
snake_case__ : List[str] = special_dtypes
snake_case__ : int = no_split_module_classes
snake_case__ : Any = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
snake_case__ : Union[str, Any] = get_balanced_memory(
A__ , low_zero=(device_map == 'balanced_low_0') , max_memory=A__ , **A__ , )
snake_case__ : Tuple = max_memory
snake_case__ : str = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
snake_case__ : Optional[int] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
snake_case__ : Any = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None ) -> Optional[int]:
if modules_to_not_convert is None:
snake_case__ : Tuple = []
snake_case__ , snake_case__ : Tuple = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None , ) -> Tuple:
snake_case__ : Union[str, Any] = False
for name, module in model.named_children():
if current_key_name is None:
snake_case__ : str = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
snake_case__ : Tuple = '.'.join(A__ )
snake_case__ : Optional[Any] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
snake_case__ : List[str] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
snake_case__ : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
snake_case__ : Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
snake_case__ : List[Any] = module.weight.data
if module.bias is not None:
snake_case__ : Any = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
snake_case__ : str = True
if len(list(module.children() ) ) > 0:
snake_case__ , snake_case__ : List[str] = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
snake_case__ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def UpperCamelCase__ ( A__ ) -> Union[str, Any]:
# Create a copy of the model
with init_empty_weights():
snake_case__ : List[str] = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
snake_case__ : str = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
snake_case__ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
snake_case__ : Union[str, Any] = sum(A__ , [] )
snake_case__ : str = len(A__ ) > 0
# Check if it is a base model
snake_case__ : Tuple = False
if hasattr(A__ , 'base_model_prefix' ):
snake_case__ : str = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
snake_case__ : Optional[int] = list(model.named_children() )
snake_case__ : str = [list_modules[-1][0]]
# add last module together with tied weights
snake_case__ : Optional[Any] = set(A__ ) - set(A__ )
snake_case__ : int = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
snake_case__ : int = ['.weight', '.bias']
snake_case__ : List[str] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
snake_case__ : List[str] = name.replace(A__ , '' )
filtered_module_names.append(A__ )
return filtered_module_names
def UpperCamelCase__ ( A__ ) -> str:
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def UpperCamelCase__ ( A__ ) -> Optional[Any]:
return next(parameter.parameters() ).device
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Optional[int]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
snake_case__ : Tuple = param_name
snake_case__ : List[Any] = model
if "." in tensor_name:
snake_case__ : int = tensor_name.split('.' )
for split in splits[:-1]:
snake_case__ : str = getattr(A__ , A__ )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
snake_case__ : int = new_module
snake_case__ : List[str] = splits[-1]
# offload weights
snake_case__ : List[Any] = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('weight' , 'SCB' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , 'meta' , dtype=A__ , value=torch.empty(*param.size() ) )
| 143
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCAmelCase__ : List[str] = {
'''configuration_speecht5''': [
'''SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP''',
'''SpeechT5Config''',
'''SpeechT5HifiGanConfig''',
],
'''feature_extraction_speecht5''': ['''SpeechT5FeatureExtractor'''],
'''processing_speecht5''': ['''SpeechT5Processor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = ['''SpeechT5Tokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : str = [
'''SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SpeechT5ForSpeechToText''',
'''SpeechT5ForSpeechToSpeech''',
'''SpeechT5ForTextToSpeech''',
'''SpeechT5Model''',
'''SpeechT5PreTrainedModel''',
'''SpeechT5HifiGan''',
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 143
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Any = logging.get_logger(__name__)
_a : str = {
'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = "wavlm"
def __init__( self , a__=32 , a__=768 , a__=12 , a__=12 , a__=3072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=0.1 , a__=0.0 , a__=0.1 , a__=0.1 , a__=0.0_2 , a__=1e-5 , a__="group" , a__="gelu" , a__=(512, 512, 512, 512, 512, 512, 512) , a__=(5, 2, 2, 2, 2, 2, 2) , a__=(10, 3, 3, 3, 3, 2, 2) , a__=False , a__=128 , a__=16 , a__=320 , a__=800 , a__=False , a__=True , a__=0.0_5 , a__=10 , a__=2 , a__=0.0 , a__=10 , a__=320 , a__=2 , a__=0.1 , a__=100 , a__=256 , a__=256 , a__=0.1 , a__="mean" , a__=False , a__=False , a__=256 , a__=(512, 512, 512, 512, 1500) , a__=(5, 3, 3, 1, 1) , a__=(1, 2, 3, 1, 1) , a__=512 , a__=80 , a__=0 , a__=1 , a__=2 , a__=False , a__=3 , a__=2 , a__=3 , a__=None , **a__ , ):
super().__init__(**a__ , pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ )
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Optional[int] = feat_extract_norm
_lowerCAmelCase : str = feat_extract_activation
_lowerCAmelCase : Any = list(a__ )
_lowerCAmelCase : Tuple = list(a__ )
_lowerCAmelCase : List[Any] = list(a__ )
_lowerCAmelCase : str = conv_bias
_lowerCAmelCase : Dict = num_buckets
_lowerCAmelCase : Dict = max_bucket_distance
_lowerCAmelCase : List[Any] = num_conv_pos_embeddings
_lowerCAmelCase : Optional[Any] = num_conv_pos_embedding_groups
_lowerCAmelCase : str = len(self.conv_dim )
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : int = hidden_dropout
_lowerCAmelCase : Dict = attention_dropout
_lowerCAmelCase : Union[str, Any] = activation_dropout
_lowerCAmelCase : Union[str, Any] = feat_proj_dropout
_lowerCAmelCase : Optional[Any] = final_dropout
_lowerCAmelCase : int = layerdrop
_lowerCAmelCase : Dict = layer_norm_eps
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Optional[int] = num_ctc_classes
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Union[str, Any] = do_stable_layer_norm
_lowerCAmelCase : Union[str, Any] = use_weighted_layer_sum
_lowerCAmelCase : int = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase : Optional[Any] = apply_spec_augment
_lowerCAmelCase : List[str] = mask_time_prob
_lowerCAmelCase : Any = mask_time_length
_lowerCAmelCase : str = mask_time_min_masks
_lowerCAmelCase : Any = mask_feature_prob
_lowerCAmelCase : str = mask_feature_length
# parameters for pretraining with codevector quantized representations
_lowerCAmelCase : Dict = num_codevectors_per_group
_lowerCAmelCase : str = num_codevector_groups
_lowerCAmelCase : Dict = contrastive_logits_temperature
_lowerCAmelCase : int = num_negatives
_lowerCAmelCase : str = codevector_dim
_lowerCAmelCase : List[Any] = proj_codevector_dim
_lowerCAmelCase : str = diversity_loss_weight
# ctc loss
_lowerCAmelCase : List[Any] = ctc_loss_reduction
_lowerCAmelCase : Union[str, Any] = ctc_zero_infinity
# adapter
_lowerCAmelCase : Optional[Any] = add_adapter
_lowerCAmelCase : Any = adapter_kernel_size
_lowerCAmelCase : Optional[int] = adapter_stride
_lowerCAmelCase : Optional[int] = num_adapter_layers
_lowerCAmelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCAmelCase : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCAmelCase : Dict = list(a__ )
_lowerCAmelCase : Any = list(a__ )
_lowerCAmelCase : Optional[Any] = list(a__ )
_lowerCAmelCase : str = xvector_output_dim
@property
def __A ( self ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 353
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ = None , a__ = None , a__ = True , a__ = None , a__ = False , a__ = None , a__ = True , a__ = "arrow" , **a__ , ):
super().__init__(
split=a__ , features=a__ , cache_dir=a__ , keep_in_memory=a__ , streaming=a__ , **a__ , )
_lowerCAmelCase : List[Any] = load_from_cache_file
_lowerCAmelCase : str = file_format
_lowerCAmelCase : Dict = Spark(
df=a__ , features=a__ , cache_dir=a__ , working_dir=a__ , **a__ , )
def __A ( self ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
_lowerCAmelCase : Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=a__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 126
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
UpperCamelCase : Optional[int] = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase : Union[str, Any] = torch.permute(_lowerCAmelCase , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCAmelCase ):
# linear layer
UpperCamelCase : Optional[Any] = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase : Union[str, Any] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase : int = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
if "metadata" in layer:
UpperCamelCase : List[str] = layer.split("metadata" )
UpperCamelCase : Union[str, Any] = "".join(split_layer[0] )[:-1]
UpperCamelCase : Optional[int] = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
UpperCamelCase : str = layer.split("kvstore" )
UpperCamelCase : List[str] = "".join(split_layer[0] )[:-1]
UpperCamelCase : Tuple = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
UpperCamelCase : List[Any] = layer.split("/" )
UpperCamelCase : Union[str, Any] = "/".join(split_layer[:-1] )
UpperCamelCase : int = (split_layer[-1],)
if "kvstore/path" in layer:
UpperCamelCase : Union[str, Any] = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
UpperCamelCase : List[str] = "file"
else:
UpperCamelCase : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> List[Any]:
UpperCamelCase : Optional[int] = rename_keys(_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = {}
for k, v in current_block.items():
UpperCamelCase : Tuple = v
UpperCamelCase : Union[str, Any] = new_current_block
torch.save(_lowerCAmelCase , _lowerCAmelCase )
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = WEIGHTS_NAME ) -> Union[str, Any]:
UpperCamelCase : List[str] = convert_file_size_to_int(_lowerCAmelCase )
UpperCamelCase : Dict = []
UpperCamelCase : List[str] = {}
UpperCamelCase : Any = 0
UpperCamelCase : Any = 0
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
UpperCamelCase : Union[str, Any] = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
UpperCamelCase : Optional[Any] = flatten_dict(_lowerCAmelCase , sep="/" )
UpperCamelCase : Tuple = {}
for layer in checkpoint_info.keys():
UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[Any] = get_key_and_tensorstore_dict(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if curr_real_layer_name in all_layers:
UpperCamelCase : int = content
else:
UpperCamelCase : Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
UpperCamelCase : Optional[int] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
UpperCamelCase : Any = torch.tensor(_lowerCAmelCase )
UpperCamelCase : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
UpperCamelCase , UpperCamelCase : str = rename_base_flax_keys(tuple(key.split("/" ) ) , _lowerCAmelCase )
UpperCamelCase : Optional[Any] = "/".join(_lowerCAmelCase )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
UpperCamelCase : Dict = os.path.join(
_lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
del current_block
UpperCamelCase : List[str] = {}
UpperCamelCase : Optional[Any] = 0
UpperCamelCase : List[Any] = raw_weights.to(getattr(_lowerCAmelCase , _lowerCAmelCase ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{len(_lowerCAmelCase )+1:05d}-of-???.bin""" ) )
rename_and_save_block(_lowerCAmelCase , _lowerCAmelCase )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(_lowerCAmelCase ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
UpperCamelCase : List[str] = {}
UpperCamelCase : str = {}
for idx, shard in enumerate(_lowerCAmelCase ):
UpperCamelCase : List[str] = weights_name.replace(
".bin" , F"""-{idx+1:05d}-of-{len(_lowerCAmelCase ):05d}.bin""" ) # len(sharded_state_dicts):05d}
UpperCamelCase : Optional[int] = os.path.join(_lowerCAmelCase , weights_name.replace(".bin" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_lowerCAmelCase , os.path.join(_lowerCAmelCase , _lowerCAmelCase ) )
UpperCamelCase : Any = shard
for key in shard:
UpperCamelCase : List[str] = shard_file
# Add the metadata
UpperCamelCase : Optional[int] = {"total_size": total_size}
UpperCamelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , "w" , encoding="utf-8" ) as f:
UpperCamelCase : Dict = json.dumps(_lowerCAmelCase , indent=2 , sort_keys=_lowerCAmelCase ) + "\n"
f.write(_lowerCAmelCase )
return metadata, index
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A_ ( ) -> Union[str, Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
UpperCamelCase : Tuple = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
UpperCamelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
UpperCamelCase : List[str] = TaTokenizer.from_pretrained("t5-small" )
UpperCamelCase : Dict = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
UpperCamelCase : Tuple = tokenizer(_lowerCAmelCase , return_tensors="pt" ).input_ids
UpperCamelCase : Tuple = model.generate(_lowerCAmelCase , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 52
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67
| 0
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : list , lowercase : int = 0 ) -> list:
_a = length or len(lowercase )
_a = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_a , _a = list_data[i + 1], list_data[i]
_a = True
return list_data if not swapped else bubble_sort(lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : Dict ):
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = VGroup(__a , __a ).arrange(__a , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_a = [mem.copy() for i in range(4 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_a = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
_a = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
cpu_targs.append(__a )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__a ).arrange(__a , buff=0 )
_a = Text("Loaded Checkpoint" , font_size=24 )
_a = Group(__a , __a ).arrange(__a , aligned_edge=__a , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_a = MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
_a = MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__a ) , Write(__a ) )
self.play(Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_a = []
_a = []
for i, rect in enumerate(__a ):
_a = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
first_animations.append(GrowFromCenter(__a , run_time=1 ) )
_a = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(*__a )
self.wait()
| 346
| 0
|
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCamelCase ( A , A ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] , *,
_lowerCAmelCase : int = 4 , _lowerCAmelCase : int = 7_6_8 , _lowerCAmelCase : int , _lowerCAmelCase : Tuple , ):
'''simple docstring'''
super().__init__()
__lowercase =nn.Parameter(torch.zeros(_lowerCAmelCase))
# parameters for additional clip time embeddings
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase)
# parameters for encoder hidden states
__lowercase =clip_extra_context_tokens
__lowercase =nn.Linear(
_lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim)
__lowercase =nn.Linear(_lowerCAmelCase , _lowerCAmelCase)
__lowercase =nn.LayerNorm(_lowerCAmelCase)
def __lowerCamelCase ( self : Optional[Any] , *, _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any):
'''simple docstring'''
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowercase =image_embeddings.shape[0]
__lowercase =self.learned_classifier_free_guidance_embeddings.unsqueeze(0)
__lowercase =classifier_free_guidance_embeddings.expand(
_lowerCAmelCase , -1)
__lowercase =torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0)
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowercase =prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowercase =self.embedding_proj(_lowerCAmelCase)
__lowercase =self.clip_image_embeddings_project_to_time_embeddings(_lowerCAmelCase)
__lowercase =time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowercase =self.clip_extra_context_tokens_proj(_lowerCAmelCase)
__lowercase =clip_extra_context_tokens.reshape(_lowerCAmelCase , -1 , self.clip_extra_context_tokens)
__lowercase =clip_extra_context_tokens.permute(0 , 2 , 1)
__lowercase =self.encoder_hidden_states_proj(_lowerCAmelCase)
__lowercase =self.text_encoder_hidden_states_norm(_lowerCAmelCase)
__lowercase =torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1)
return text_encoder_hidden_states, additive_clip_time_embeddings
| 166
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece_bpe_char.model""")
@require_sentencepiece
@require_tokenizers
class _UpperCamelCase ( A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechTaTokenizer
lowerCAmelCase__ = False
lowerCAmelCase__ = True
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__lowercase =SpeechTaTokenizer(_lowerCAmelCase)
__lowercase =AddedToken('<mask>' , lstrip=_lowerCAmelCase , rstrip=_lowerCAmelCase)
__lowercase =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token})
tokenizer.add_tokens(['<ctc_blank>'])
tokenizer.save_pretrained(self.tmpdirname)
def __lowerCamelCase ( self : List[Any] , _lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase ='this is a test'
__lowercase ='this is a test'
return input_text, output_text
def __lowerCamelCase ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : List[Any]=False , _lowerCAmelCase : Dict=2_0 , _lowerCAmelCase : Tuple=5):
'''simple docstring'''
__lowercase , __lowercase =self.get_input_output_texts(_lowerCAmelCase)
__lowercase =tokenizer.encode(_lowerCAmelCase , add_special_tokens=_lowerCAmelCase)
__lowercase =tokenizer.decode(_lowerCAmelCase , clean_up_tokenization_spaces=_lowerCAmelCase)
return text, ids
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase ='<pad>'
__lowercase =1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowerCAmelCase) , _lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowerCAmelCase) , _lowerCAmelCase)
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
__lowercase =list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-4] , 'œ')
self.assertEqual(vocab_keys[-2] , '<mask>')
self.assertEqual(vocab_keys[-1] , '<ctc_blank>')
self.assertEqual(len(_lowerCAmelCase) , 8_1)
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 7_9)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.get_tokenizers(do_lower_case=_lowerCAmelCase)
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}"""):
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowercase =['aaaaa bbbbbb', 'cccccccccdddddddd']
__lowercase =tokenizer.add_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size + len(_lowerCAmelCase))
__lowercase =tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
__lowercase ={'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__lowercase =tokenizer.add_special_tokens(_lowerCAmelCase)
__lowercase =tokenizer.vocab_size
__lowercase =len(_lowerCAmelCase)
self.assertNotEqual(_lowerCAmelCase , 0)
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase)
self.assertEqual(_lowerCAmelCase , len(_lowerCAmelCase))
self.assertEqual(_lowerCAmelCase , all_size_a + len(_lowerCAmelCase))
__lowercase =tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=_lowerCAmelCase)
self.assertGreaterEqual(len(_lowerCAmelCase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
pass
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
__lowercase =self.get_tokenizer()
__lowercase =tokenizer.tokenize('This is a test')
# fmt: off
self.assertListEqual(_lowerCAmelCase , [SPIECE_UNDERLINE, 'T', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'a', SPIECE_UNDERLINE, 't', 'e', 's', 't'])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowerCAmelCase) , [4, 3_2, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 7, 4, 6, 5, 1_2, 6] , )
__lowercase =tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '92000', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
__lowercase =tokenizer.convert_tokens_to_ids(_lowerCAmelCase)
# fmt: off
self.assertListEqual(_lowerCAmelCase , [4, 3_0, 4, 2_0, 7, 1_2, 4, 2_5, 8, 1_3, 9, 4, 1_0, 9, 4, 3, 2_3, 4, 7, 9, 1_4, 4, 6, 1_1, 1_0, 1_2, 4, 1_0, 1_2, 4, 1_9, 7, 1_5, 1_2, 7_3, 2_6])
# fmt: on
__lowercase =tokenizer.convert_ids_to_tokens(_lowerCAmelCase)
self.assertListEqual(
_lowerCAmelCase , [SPIECE_UNDERLINE, 'I', SPIECE_UNDERLINE, 'w', 'a', 's', SPIECE_UNDERLINE, 'b', 'o', 'r', 'n', SPIECE_UNDERLINE, 'i', 'n', SPIECE_UNDERLINE, '<unk>', ',', SPIECE_UNDERLINE, 'a', 'n', 'd', SPIECE_UNDERLINE, 't', 'h', 'i', 's', SPIECE_UNDERLINE, 'i', 's', SPIECE_UNDERLINE, 'f', 'a', 'l', 's', 'é', '.'])
@slow
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =[
'Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '
'general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '
'Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '
'models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.',
'BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '
'conditioning on both left and right context in all layers.',
'The quick brown fox jumps over the lazy dog.',
]
# fmt: off
__lowercase ={
'input_ids': [
[4, 3_2, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 6_4, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_5, 2_2, 4, 2_8, 9, 8, 2_0, 9, 4, 7, 1_2, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 6, 1_3, 7, 9, 1_2, 1_9, 8, 1_3, 1_8, 5, 1_3, 1_2, 4, 7, 9, 1_4, 4, 2_4, 2_2, 6, 8, 1_3, 1_7, 1_1, 3_9, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 3_9, 2_5, 5, 1_3, 6, 6_3, 4, 2_4, 1_3, 8, 2_7, 1_0, 1_4, 5, 1_2, 4, 2_1, 5, 9, 5, 1_3, 7, 1_5, 3_9, 2_4, 1_6, 1_3, 2_4, 8, 1_2, 5, 4, 7, 1_3, 1_7, 1_1, 1_0, 6, 5, 1_7, 6, 1_6, 1_3, 5, 1_2, 4, 6_4, 4_0, 4_7, 5_4, 3_2, 2_3, 4, 5_3, 4_9, 3_2, 2_3, 4, 5_4, 8, 4_0, 4_7, 5_4, 3_2, 7, 2_3, 4, 6_9, 5_2, 4_3, 2_3, 4, 5_1, 1_0, 1_2, 6, 1_0, 1_5, 4_0, 5, 1_3, 6, 2_3, 4, 6_9, 5_2, 4_8, 5, 6, 2_6, 2_6, 2_6, 6_3, 4, 1_9, 8, 1_3, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 6_1, 9, 1_4, 5, 1_3, 1_2, 6, 7, 9, 1_4, 1_0, 9, 2_1, 4, 6_4, 4_8, 5_2, 6_1, 6_3, 4, 7, 9, 1_4, 4, 4_8, 7, 6, 1_6, 1_3, 7, 1_5, 4, 5_2, 7, 9, 2_1, 1_6, 7, 2_1, 5, 4, 5_3, 5, 9, 5, 1_3, 7, 6, 1_0, 8, 9, 4, 6_4, 4_8, 5_2, 5_3, 6_3, 4, 2_0, 1_0, 6, 1_1, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 1_0, 1_3, 6, 2_2, 3_9, 6, 2_0, 8, 4, 2_4, 1_3, 5, 6, 1_3, 7, 1_0, 9, 5, 1_4, 4, 1_8, 8, 1_4, 5, 1_5, 1_2, 4, 1_0, 9, 4, 8, 9, 5, 4, 1_1, 1_6, 9, 1_4, 1_3, 5, 1_4, 4, 2_4, 1_5, 1_6, 1_2, 4, 1_5, 7, 9, 2_1, 1_6, 7, 2_1, 5, 1_2, 4, 7, 9, 1_4, 4, 1_4, 5, 5, 2_4, 4, 1_0, 9, 6, 5, 1_3, 8, 2_4, 5, 1_3, 7, 2_5, 1_0, 1_5, 1_0, 6, 2_2, 4, 2_5, 5, 6, 2_0, 5, 5, 9, 4, 5_8, 7, 3_7, 2_3, 4, 4_9, 2_2, 3_2, 8, 1_3, 1_7, 1_1, 4, 7, 9, 1_4, 4, 3_2, 5, 9, 1_2, 8, 1_3, 5_5, 1_5, 8, 2_0, 2_6, 2],
[4, 4_0, 4_7, 5_4, 3_2, 4, 1_0, 1_2, 4, 1_4, 5, 1_2, 1_0, 2_1, 9, 5, 1_4, 4, 6, 8, 4, 2_4, 1_3, 5, 3_9, 6, 1_3, 7, 1_0, 9, 4, 1_4, 5, 5, 2_4, 4, 2_5, 1_0, 1_4, 1_0, 1_3, 5, 1_7, 6, 1_0, 8, 9, 7, 1_5, 4, 1_3, 5, 2_4, 1_3, 5, 1_2, 5, 9, 6, 7, 6, 1_0, 8, 9, 1_2, 4, 1_9, 1_3, 8, 1_8, 4, 1_6, 9, 1_5, 7, 2_5, 5, 1_5, 5, 1_4, 4, 6, 5, 3_7, 6, 4, 2_5, 2_2, 4, 4_6, 8, 1_0, 9, 6, 1_5, 2_2, 4, 1_7, 8, 9, 1_4, 1_0, 6, 1_0, 8, 9, 1_0, 9, 2_1, 4, 8, 9, 4, 2_5, 8, 6, 1_1, 4, 1_5, 5, 1_9, 6, 4, 7, 9, 1_4, 4, 1_3, 1_0, 2_1, 1_1, 6, 4, 1_7, 8, 9, 6, 5, 3_7, 6, 4, 1_0, 9, 4, 7, 1_5, 1_5, 4, 1_5, 7, 2_2, 5, 1_3, 1_2, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 3_2, 1_1, 5, 4, 4_5, 1_6, 1_0, 1_7, 2_8, 4, 2_5, 1_3, 8, 2_0, 9, 4, 1_9, 8, 3_7, 4, 4_6, 1_6, 1_8, 2_4, 1_2, 4, 8, 2_7, 5, 1_3, 4, 6, 1_1, 5, 4, 1_5, 7, 5_7, 2_2, 4, 1_4, 8, 2_1, 2_6, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowerCAmelCase , model_name='microsoft/speecht5_asr' , revision='c5ef64c71905caeccde0e4462ef3f9077224c524' , sequences=_lowerCAmelCase , )
| 166
| 1
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
a : List[Any] = None
a : str = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
a : Optional[int] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
a : Tuple = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
a : List[str] = '▁'
class _a ( _UpperCamelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = BigBirdTokenizer
A = ['input_ids', 'attention_mask']
A = []
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_="[CLS]", **SCREAMING_SNAKE_CASE_, ) -> Union[str, Any]:
UpperCAmelCase_: Dict = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else bos_token
UpperCAmelCase_: Dict = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else eos_token
UpperCAmelCase_: Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else unk_token
UpperCAmelCase_: Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else pad_token
UpperCAmelCase_: Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else cls_token
UpperCAmelCase_: Any = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_: Any = AddedToken(_SCREAMING_SNAKE_CASE, lstrip=_SCREAMING_SNAKE_CASE, rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE, tokenizer_file=_SCREAMING_SNAKE_CASE, bos_token=_SCREAMING_SNAKE_CASE, eos_token=_SCREAMING_SNAKE_CASE, unk_token=_SCREAMING_SNAKE_CASE, sep_token=_SCREAMING_SNAKE_CASE, pad_token=_SCREAMING_SNAKE_CASE, cls_token=_SCREAMING_SNAKE_CASE, mask_token=_SCREAMING_SNAKE_CASE, **_SCREAMING_SNAKE_CASE, )
UpperCAmelCase_: List[Any] = vocab_file
UpperCAmelCase_: List[Any] = False if not self.vocab_file else True
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: str = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: Union[str, Any] = [self.sep_token_id]
UpperCAmelCase_: List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCAmelCase_: Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE, (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file, _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 368
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a : Optional[Any] = logging.get_logger(__name__)
a : Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a : Dict = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
a : str = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
a : Optional[int] = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class _a ( _lowerCAmelCase ):
A = VOCAB_FILES_NAMES
A = PRETRAINED_VOCAB_FILES_MAP
A = PRETRAINED_INIT_CONFIGURATION
A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A = SqueezeBertTokenizer
def __init__(self, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="[UNK]", SCREAMING_SNAKE_CASE_="[SEP]", SCREAMING_SNAKE_CASE_="[PAD]", SCREAMING_SNAKE_CASE_="[CLS]", SCREAMING_SNAKE_CASE_="[MASK]", SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int:
super().__init__(
SCREAMING_SNAKE_CASE_, tokenizer_file=SCREAMING_SNAKE_CASE_, do_lower_case=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, tokenize_chinese_chars=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
UpperCAmelCase_: str = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""", SCREAMING_SNAKE_CASE_ ) != do_lower_case
or normalizer_state.get("""strip_accents""", SCREAMING_SNAKE_CASE_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""", SCREAMING_SNAKE_CASE_ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Optional[Any] = getattr(SCREAMING_SNAKE_CASE_, normalizer_state.pop("""type""" ) )
UpperCAmelCase_: Optional[Any] = do_lower_case
UpperCAmelCase_: int = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: List[Any] = normalizer_class(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = do_lower_case
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=None ) -> Optional[Any]:
UpperCAmelCase_: Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCAmelCase_: List[Any] = [self.sep_token_id]
UpperCAmelCase_: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
UpperCAmelCase_: Dict = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_, name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
| 82
| 0
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_a = logging.get_logger(__name__)
_a = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
_a = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_a = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _A ( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = (
list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8):
if b not in bs:
bs.append(UpperCamelCase_)
cs.append(2**8 + n)
n += 1
__lowercase = [chr(UpperCamelCase_) for n in cs]
return dict(zip(UpperCamelCase_, UpperCamelCase_))
def _A ( UpperCamelCase_ : List[Any]) -> Any:
'''simple docstring'''
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
__lowercase = char
return pairs
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[Any] = ["input_ids", "attention_mask"]
def __init__( self : Optional[Any], UpperCAmelCase__ : List[str], UpperCAmelCase__ : List[Any], UpperCAmelCase__ : Union[str, Any]="replace", UpperCAmelCase__ : Tuple="<s>", UpperCAmelCase__ : Optional[Any]="</s>", UpperCAmelCase__ : Any="</s>", UpperCAmelCase__ : Optional[int]="<s>", UpperCAmelCase__ : Tuple="<unk>", UpperCAmelCase__ : Dict="<pad>", UpperCAmelCase__ : int="<mask>", UpperCAmelCase__ : Any=False, **UpperCAmelCase__ : List[Any], ):
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else bos_token
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else eos_token
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else sep_token
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else cls_token
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else unk_token
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else mask_token
super().__init__(
errors=UpperCAmelCase__, bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, add_prefix_space=UpperCAmelCase__, **UpperCAmelCase__, )
with open(UpperCAmelCase__, encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(UpperCAmelCase__ )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase__, encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(UpperCAmelCase__, range(len(UpperCAmelCase__ ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(r"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[str] ):
return len(self.encoder )
def _lowercase ( self : int ):
return dict(self.encoder, **self.added_tokens_encoder )
def _lowercase ( self : str, UpperCAmelCase__ : str ):
if token in self.cache:
return self.cache[token]
__lowercase = tuple(UpperCAmelCase__ )
__lowercase = get_pairs(UpperCAmelCase__ )
if not pairs:
return token
while True:
__lowercase = min(UpperCAmelCase__, key=lambda UpperCAmelCase__ : self.bpe_ranks.get(UpperCAmelCase__, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase ,__lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(UpperCAmelCase__ ):
try:
__lowercase = word.index(UpperCAmelCase__, UpperCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(UpperCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(UpperCAmelCase__ )
__lowercase = new_word
if len(UpperCAmelCase__ ) == 1:
break
else:
__lowercase = get_pairs(UpperCAmelCase__ )
__lowercase = " ".join(UpperCAmelCase__ )
__lowercase = word
return word
def _lowercase ( self : Tuple, UpperCAmelCase__ : Tuple ):
__lowercase = []
for token in re.findall(self.pat, UpperCAmelCase__ ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase__ ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Optional[int], UpperCAmelCase__ : int ):
return self.encoder.get(UpperCAmelCase__, self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[Any], UpperCAmelCase__ : str ):
return self.decoder.get(UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Any ):
__lowercase = "".join(UpperCAmelCase__ )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def _lowercase ( self : str, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowercase = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(UpperCAmelCase__, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=UpperCAmelCase__, ensure_ascii=UpperCAmelCase__ ) + "\n" )
__lowercase = 0
with open(UpperCAmelCase__, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda UpperCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(UpperCAmelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowercase = [self.cls_token_id]
__lowercase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Any, UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None, UpperCAmelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1]
def _lowercase ( self : List[Any], UpperCAmelCase__ : List[int], UpperCAmelCase__ : Optional[List[int]] = None ):
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Dict, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str=False, **UpperCAmelCase__ : List[Any] ):
__lowercase = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase__ ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def _lowercase ( self : Dict, UpperCAmelCase__ : Union[Dict[str, EncodedInput], BatchEncoding], UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[bool] = None, ):
__lowercase = super()._pad(
encoded_inputs=UpperCAmelCase__, max_length=UpperCAmelCase__, padding_strategy=UpperCAmelCase__, pad_to_multiple_of=UpperCAmelCase__, return_attention_mask=UpperCAmelCase__, )
# Load from model defaults
if return_attention_mask is None:
__lowercase = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowercase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowercase = len(encoded_inputs["global_attention_mask"] ) != len(UpperCAmelCase__ )
if needs_to_be_padded:
__lowercase = len(UpperCAmelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowercase = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
__lowercase = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 17
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = ['stem']
_UpperCAmelCase = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_a : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_a : str = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_a : Optional[int] = False
_a : List[str] = False
_a : List[str] = False
_a : Optional[int] = False
_a : Tuple = False
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
return
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def UpperCAmelCase__ ( self ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip('Swin does not support feedforward chunking' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def UpperCAmelCase__ ( self ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}.'''
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class __a ( unittest.TestCase , UpperCAmelCase ):
_a : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_a : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 329
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a_ = 25_60_47
a_ = 25_61_45
@require_sentencepiece
@require_tokenizers
class snake_case ( __snake_case , unittest.TestCase):
__UpperCamelCase = NllbTokenizer
__UpperCamelCase = NllbTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
__UpperCamelCase = {}
def a_ ( self : int ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_A = NllbTokenizer(a_ , keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def a_ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
_A = NllbTokenizer(a_ , keep_accents=a_ )
_A = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_A = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_A = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def a_ ( self : Any ) -> Dict:
'''simple docstring'''
_A = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-nllb''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
_A = self.tokenizer_class.from_pretrained(a_ , **a_ )
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a_ )
_A = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_A = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a_ , a_ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a_ )
_A = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=True
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a_ , legacy_format=a_ )
_A = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files
self.assertSequenceEqual(a_ , a_ )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a_ )
_A = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=False
_A = tempfile.mkdtemp()
_A = tokenizer_r.save_pretrained(a_ , legacy_format=a_ )
_A = tokenizer_p.save_pretrained(a_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_A = tokenizer_r.from_pretrained(a_ )
_A = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_ , a_ ) )
shutil.rmtree(a_ )
@require_torch
def a_ ( self : Any ) -> Any:
'''simple docstring'''
if not self.test_seqaseq:
return
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
_A = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'''
''' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'''
''' will only worsen the violence and misery for millions of people.''',
]
_A = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'''
''' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'''
''' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
try:
_A = tokenizer.prepare_seqaseq_batch(
src_texts=a_ , tgt_texts=a_ , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
_A = tokenizer.prepare_seqaseq_batch(
a_ , tgt_texts=a_ , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
_A = tokenizer.prepare_seqaseq_batch(
src_texts=a_ , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , a_ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def a_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_A = [AddedToken("<special>" , lstrip=a_ )]
_A = self.rust_tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , **a_ )
_A = tokenizer_r.encode("Hey this is a <special> token" )
_A = tokenizer_r.encode("<special>" , add_special_tokens=a_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
_A = self.rust_tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , **a_ , )
_A = self.tokenizer_class.from_pretrained(
a_ , additional_special_tokens=a_ , **a_ )
_A = tokenizer_p.encode("Hey this is a <special> token" )
_A = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(a_ , a_ )
self.assertEqual(a_ , a_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class snake_case ( unittest.TestCase):
__UpperCamelCase = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def a_ ( cls : int ) -> Tuple:
'''simple docstring'''
_A = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
_A = 1
return cls
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_60_01 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_60_02 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_60_57 )
def a_ ( self : int ) -> Tuple:
'''simple docstring'''
_A = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a_ )
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
self.assertIn(a_ , self.tokenizer.all_special_ids )
# fmt: off
_A = [RO_CODE, 42_54, 9_80_68, 11_29_23, 3_90_72, 39_09, 7_13, 10_27_67, 26, 1_73_14, 3_56_42, 1_46_83, 3_31_18, 20_22, 6_69_87, 2, 25_60_47]
# fmt: on
_A = self.tokenizer.decode(a_ , skip_special_tokens=a_ )
_A = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a_ )
self.assertEqual(a_ , a_ )
self.assertNotIn(self.tokenizer.eos_token , a_ )
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_A = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , a_ )
_A = 10
_A = self.tokenizer(a_ , max_length=a_ , truncation=a_ ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , a_ )
self.assertEqual(len(a_ ) , a_ )
def a_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [25_62_03, 3] )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = tempfile.mkdtemp()
_A = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
_A = NllbTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , a_ )
@require_torch
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
_A = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(a_ , a_ )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , a_ )
self.assertEqual(a_ , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def a_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_A = self.tokenizer(self.src_text , padding=a_ , truncation=a_ , max_length=3 , return_tensors="pt" )
_A = self.tokenizer(
text_target=self.tgt_text , padding=a_ , truncation=a_ , max_length=10 , return_tensors="pt" )
_A = targets['''input_ids''']
_A = shift_tokens_right(
a_ , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def a_ ( self : Dict ) -> List[str]:
'''simple docstring'''
_A = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(a_ ) , {
# A, test, EOS, en_XX
"input_ids": [[25_60_47, 70, 73_56, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_60_57,
} , )
@require_torch
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = True
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2, 25_60_47] )
_A = False
_A = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [25_60_47, 1_62_97, 13_44_08, 2_56_53, 63_70, 2_48, 2_54, 10_39_29, 9_49_95, 1_08, 4_94_86, 2] )
| 352
|
"""simple docstring"""
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(__lowercase , __lowercase ) ) )
def a__ ( __lowercase ) -> None:
if point:
if isinstance(__lowercase , __lowercase ):
for item in point:
if not isinstance(__lowercase , (int, float) ):
_A = (
"Expected a list of numbers as input, found "
f"""{type(__lowercase ).__name__}"""
)
raise TypeError(__lowercase )
else:
_A = f"""Expected a list of numbers as input, found {type(__lowercase ).__name__}"""
raise TypeError(__lowercase )
else:
raise ValueError("Missing an input" )
def a__ ( __lowercase , __lowercase ) -> float:
_validate_point(__lowercase )
_validate_point(__lowercase )
if len(__lowercase ) != len(__lowercase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(__lowercase , __lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 163
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.