code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
def a__ ( UpperCAmelCase : str ) -> str:
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 |
from __future__ import annotations
def a__ ( UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Union[str, Any] = len(UpperCAmelCase )
# We need to create solution object to save path.
UpperCAmelCase : int = [[0 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )]
UpperCAmelCase : Union[str, Any] = run_maze(UpperCAmelCase , 0 , 0 , UpperCAmelCase )
if solved:
print('''\n'''.join(str(UpperCAmelCase ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a__ ( UpperCAmelCase : list[list[int]] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : list[list[int]] ) -> bool:
UpperCAmelCase : Dict = len(UpperCAmelCase )
# Final check point.
if i == j == (size - 1):
UpperCAmelCase : Dict = 1
return True
UpperCAmelCase : Union[str, Any] = (not i < 0) and (not j < 0) # Check lower bounds
UpperCAmelCase : List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCAmelCase : Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCAmelCase : str = 1
# check for directions
if (
run_maze(UpperCAmelCase , i + 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j + 1 , UpperCAmelCase )
or run_maze(UpperCAmelCase , i - 1 , UpperCAmelCase , UpperCAmelCase )
or run_maze(UpperCAmelCase , UpperCAmelCase , j - 1 , UpperCAmelCase )
):
return True
UpperCAmelCase : Any = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336 | 1 |
"""simple docstring"""
from __future__ import annotations
def _lowerCAmelCase ( UpperCAmelCase : list[float] , UpperCAmelCase : list[float] ):
'''simple docstring'''
UpperCamelCase__ : List[str] =sorted(numsa + numsa )
UpperCamelCase__ , UpperCamelCase__ : Optional[Any] =divmod(len(UpperCAmelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE : List[str] = [float(x) for x in input("""Enter the elements of first array: """).split()]
_SCREAMING_SNAKE_CASE : Dict = [float(x) for x in input("""Enter the elements of second array: """).split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 157 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'transfo-xl'
SCREAMING_SNAKE_CASE_ = ['mems']
SCREAMING_SNAKE_CASE_ = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , lowercase_ : str=26_7735 , lowercase_ : Union[str, Any]=[2_0000, 4_0000, 20_0000] , lowercase_ : Union[str, Any]=1024 , lowercase_ : Tuple=1024 , lowercase_ : int=16 , lowercase_ : str=64 , lowercase_ : Union[str, Any]=4096 , lowercase_ : Dict=4 , lowercase_ : Dict=False , lowercase_ : Dict=18 , lowercase_ : Optional[Any]=1600 , lowercase_ : str=1000 , lowercase_ : List[Any]=True , lowercase_ : Tuple=True , lowercase_ : Any=0 , lowercase_ : Union[str, Any]=-1 , lowercase_ : List[str]=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : List[str]=True , lowercase_ : Optional[int]="normal" , lowercase_ : str=0.0_1 , lowercase_ : Any=0.0_1 , lowercase_ : Union[str, Any]=0.0_2 , lowercase_ : List[str]=1e-5 , lowercase_ : Optional[int]=0 , **lowercase_ : Union[str, Any] , ):
UpperCamelCase__ : Union[str, Any] =vocab_size
UpperCamelCase__ : Union[str, Any] =[]
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
UpperCamelCase__ : List[Any] =[False] + [True] * len(self.cutoffs )
else:
UpperCamelCase__ : Union[str, Any] =[False] + [False] * len(self.cutoffs )
UpperCamelCase__ : Dict =d_model
UpperCamelCase__ : Union[str, Any] =d_embed
UpperCamelCase__ : Optional[Any] =d_head
UpperCamelCase__ : str =d_inner
UpperCamelCase__ : List[Any] =div_val
UpperCamelCase__ : Any =pre_lnorm
UpperCamelCase__ : List[Any] =n_layer
UpperCamelCase__ : List[str] =n_head
UpperCamelCase__ : Dict =mem_len
UpperCamelCase__ : Optional[Any] =same_length
UpperCamelCase__ : Optional[int] =attn_type
UpperCamelCase__ : Any =clamp_len
UpperCamelCase__ : str =sample_softmax
UpperCamelCase__ : Optional[Any] =adaptive
UpperCamelCase__ : Tuple =dropout
UpperCamelCase__ : Any =dropatt
UpperCamelCase__ : Tuple =untie_r
UpperCamelCase__ : Optional[int] =init
UpperCamelCase__ : Optional[int] =init_range
UpperCamelCase__ : str =proj_init_std
UpperCamelCase__ : Union[str, Any] =init_std
UpperCamelCase__ : Optional[Any] =layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _lowerCAmelCase ( self : str ):
# Message copied from Transformer-XL documentation
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 157 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Any ) -> Any:
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Tuple=None , **_A : str ) -> List[Any]:
__magic_name__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel(_A )
__magic_name__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self : Any , _A : Tuple , _A : Tuple , _A : str , _A : Optional[int] , _A : Any=None , **_A : int ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.get_vision_text_model(_A , _A )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Dict = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : Dict , _A : Any , _A : Dict , _A : List[Any] , _A : List[str] , _A : Optional[Any]=None , **_A : List[Any] ) -> Any:
__magic_name__ , __magic_name__ : List[str] = self.get_vision_text_model(_A , _A )
__magic_name__ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
__magic_name__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ : Optional[int] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Optional[int] , _A : int , _A : Dict , _A : Union[str, Any]=None , **_A : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(_A , _A )
__magic_name__ : int = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : str = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : Tuple = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : int = after_output[0].numpy()
__magic_name__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Union[str, Any]=None , **_A : str ) -> Dict:
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(_A , _A )
__magic_name__ : Any = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Any = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Tuple = to_atuple(vision_model.config.image_size )
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
__magic_name__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Any = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : np.ndarray , _A : float ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , F'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __lowerCAmelCase ( self : Any ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : str = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Tuple = self.get_pretrained_model_and_inputs()
__magic_name__ : Tuple = model_a(**_A )
__magic_name__ : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__magic_name__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : List[str] = model_a(**_A )
__magic_name__ : Any = after_outputs[0].numpy()
__magic_name__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
__magic_name__ : List[str] = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Tuple = random_attention_mask([batch_size, 4] )
__magic_name__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : str , _A : int , _A : int ) -> str:
__magic_name__ : Any = TFViTModel(_A , name='vision_model' )
__magic_name__ : int = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : Tuple = TFViTModelTester(self )
__magic_name__ : str = TFBertModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : str = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
__magic_name__ : Any = 13
__magic_name__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[Any] = random_attention_mask([batch_size, 4] )
__magic_name__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : str , _A : int , _A : str , _A : Optional[Any] , _A : Optional[Any] , _A : str=None , **_A : List[Any] ) -> Dict:
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(_A , _A )
__magic_name__ : Dict = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : List[str] = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ : str = to_atuple(vision_model.config.image_size )
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : List[str] ) -> int:
__magic_name__ : Dict = TFDeiTModel(_A , name='vision_model' )
__magic_name__ : str = TFRobertaModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__magic_name__ : Optional[Any] = TFDeiTModelTester(self )
__magic_name__ : List[Any] = TFRobertaModelTester(self )
__magic_name__ : Any = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
__magic_name__ : Tuple = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[int] = random_attention_mask([batch_size, 4] )
__magic_name__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[int] ) -> int:
__magic_name__ : Any = TFCLIPVisionModel(_A , name='vision_model' )
__magic_name__ : Dict = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : int = TFCLIPVisionModelTester(self )
__magic_name__ : Dict = TFBertModelTester(self )
__magic_name__ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
__magic_name__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : int = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_A )
__magic_name__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__magic_name__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__magic_name__ : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np' )
__magic_name__ : int = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ : Optional[int] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) ) | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : str , _a : List[Any] , _a : str=3 , _a : int=32 , _a : str=3 , _a : int=10 , _a : str=[10, 20, 30, 40] , _a : int=[1, 1, 2, 1] , _a : Tuple=True , _a : Dict=True , _a : Dict="relu" , _a : List[str]=3 , _a : Dict=None , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =parent
_SCREAMING_SNAKE_CASE =batch_size
_SCREAMING_SNAKE_CASE =image_size
_SCREAMING_SNAKE_CASE =num_channels
_SCREAMING_SNAKE_CASE =embeddings_size
_SCREAMING_SNAKE_CASE =hidden_sizes
_SCREAMING_SNAKE_CASE =depths
_SCREAMING_SNAKE_CASE =is_training
_SCREAMING_SNAKE_CASE =use_labels
_SCREAMING_SNAKE_CASE =hidden_act
_SCREAMING_SNAKE_CASE =num_labels
_SCREAMING_SNAKE_CASE =scope
_SCREAMING_SNAKE_CASE =len(_a )
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_SCREAMING_SNAKE_CASE =None
if self.use_labels:
_SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] , self.num_labels )
_SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def A ( self : Any , _a : List[str] , _a : int , _a : int ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFResNetModel(config=_a )
_SCREAMING_SNAKE_CASE =model(_a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def A ( self : List[Any] , _a : Any , _a : Union[str, Any] , _a : Union[str, Any] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.num_labels
_SCREAMING_SNAKE_CASE =TFResNetForImageClassification(_a )
_SCREAMING_SNAKE_CASE =model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =config_and_inputs
_SCREAMING_SNAKE_CASE ={'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( A__ , A__ , unittest.TestCase ):
A__ = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A__ = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A__ = False
A__ = False
A__ = False
A__ = False
A__ = False
def A ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFResNetModelTester(self )
_SCREAMING_SNAKE_CASE =ConfigTester(self , config_class=_a , has_text_modality=_a )
def A ( self : List[Any] ) -> Any:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def A ( self : Optional[Any] ) -> int:
'''simple docstring'''
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
_SCREAMING_SNAKE_CASE =['pixel_values']
self.assertListEqual(arg_names[:1] , _a )
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def A ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
def check_hidden_states_output(_a : List[str] , _a : List[Any] , _a : Dict ):
_SCREAMING_SNAKE_CASE =model_class(_a )
_SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(_a , _a ) )
_SCREAMING_SNAKE_CASE =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_SCREAMING_SNAKE_CASE =self.model_tester.num_stages
self.assertEqual(len(_a ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
_SCREAMING_SNAKE_CASE =['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
_SCREAMING_SNAKE_CASE =layer_type
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_SCREAMING_SNAKE_CASE =True
check_hidden_states_output(_a , _a , _a )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def A ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_SCREAMING_SNAKE_CASE =TFResNetModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _lowerCAmelCase ( ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def A ( self : str ) -> str:
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def A ( self : int ) -> Tuple:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_SCREAMING_SNAKE_CASE =self.default_image_processor
_SCREAMING_SNAKE_CASE =prepare_img()
_SCREAMING_SNAKE_CASE =image_processor(images=_a , return_tensors='tf' )
# forward pass
_SCREAMING_SNAKE_CASE =model(**_a )
# verify the logits
_SCREAMING_SNAKE_CASE =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _a )
_SCREAMING_SNAKE_CASE =tf.constant([-11.10_69, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _a , atol=1e-4 ) )
| 114 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_SCREAMING_SNAKE_CASE =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_SCREAMING_SNAKE_CASE =float(factorial(_UpperCamelCase ) )
coefficient /= factorial(_UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 114 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
A = StableUnCLIPPipeline
A = TEXT_TO_IMAGE_PARAMS
A = TEXT_TO_IMAGE_BATCH_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
A = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
A = False
def a_ (self ) -> Any:
__UpperCamelCase : Dict = 3_2
__UpperCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=__SCREAMING_SNAKE_CASE , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__UpperCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=1_2 , embedding_dim=__SCREAMING_SNAKE_CASE , num_layers=1 , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
__UpperCamelCase : str = StableUnCLIPImageNormalizer(embedding_dim=__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
__UpperCamelCase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
__UpperCamelCase : Dict = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__SCREAMING_SNAKE_CASE , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) )
torch.manual_seed(0 )
__UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__SCREAMING_SNAKE_CASE , layers_per_block=1 , upcast_attention=__SCREAMING_SNAKE_CASE , use_linear_projection=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
__UpperCamelCase : Union[str, Any] = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00_085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , )
torch.manual_seed(0 )
__UpperCamelCase : Tuple = AutoencoderKL()
__UpperCamelCase : List[str] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=0 ) -> Union[str, Any]:
if str(__SCREAMING_SNAKE_CASE ).startswith("mps" ):
__UpperCamelCase : int = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
__UpperCamelCase : Tuple = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
__UpperCamelCase : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__SCREAMING_SNAKE_CASE )
def a_ (self ) -> Optional[Any]:
__UpperCamelCase : str = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
'''simple docstring'''
def a_ (self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ (self ) -> Dict:
__UpperCamelCase : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
__UpperCamelCase : Dict = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase : List[str] = pipe("anime turle" , generator=__SCREAMING_SNAKE_CASE , output_type="np" )
__UpperCamelCase : Any = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a_ (self ) -> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__UpperCamelCase : List[str] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[int] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__UpperCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
__UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 1_0**9
| 298 | import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=2 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' , return_dict=__SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
lowerCAmelCase = '''google/ncsnpp-celebahq-256'''
lowerCAmelCase = UNetaDModel.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KarrasVeScheduler()
lowerCAmelCase = KarrasVePipeline(unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = pipe(num_inference_steps=20 , generator=__SCREAMING_SNAKE_CASE , output_type='''numpy''' ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 338 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase__ : Optional[NamedSplit] = None , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : Optional[int] , ) -> Dict:
__SCREAMING_SNAKE_CASE = path_or_paths
__SCREAMING_SNAKE_CASE = split if split or isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else "train"
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase_ ( self : List[Any] ) -> Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
pass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : Optional[Features] = None , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : Optional[int] = None , **UpperCAmelCase__ : int , ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = features
__SCREAMING_SNAKE_CASE = cache_dir
__SCREAMING_SNAKE_CASE = keep_in_memory
__SCREAMING_SNAKE_CASE = streaming
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = kwargs
@abstractmethod
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Union[Dataset, IterableDataset]:
pass
| 359 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ , a__ , a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[int] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self ):
"""simple docstring"""
lowercase_ : int = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Dict = {}
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
if nodea not in self.connections:
self.add_node(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = probability
def _snake_case ( self ):
"""simple docstring"""
return list(self.connections )
def _snake_case ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase_ : Any = 0
lowercase_ : Tuple = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : list[tuple[str, str, float]] , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : List[Any] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : str = Counter(graph.get_nodes() )
lowercase_ : Any = start
for _ in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : int = graph.transition(__SCREAMING_SNAKE_CASE )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 | 1 |
'''simple docstring'''
import qiskit
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : Tuple = qiskit.QuantumCircuit(_snake_case , _snake_case )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
_UpperCAmelCase : int = qiskit.execute(_snake_case , _snake_case , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_snake_case )
if __name__ == "__main__":
lowerCamelCase__ = single_qubit_measure(2, 2)
print(F'''Total count for various states are: {counts}''')
| 369 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "speech_to_text_2"
lowerCAmelCase : str = ["past_key_values"]
lowerCAmelCase : int = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[Any] , lowerCamelCase__ : Tuple=1_00_00 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Tuple=20_48 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Tuple="relu" , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=1 , lowerCamelCase__ : int=0 , lowerCamelCase__ : str=2 , lowerCamelCase__ : List[Any]=10_24 , **lowerCamelCase__ : str , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[Any] = decoder_ffn_dim
_UpperCAmelCase : Any = decoder_layers
_UpperCAmelCase : int = decoder_attention_heads
_UpperCAmelCase : Any = dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : Optional[int] = activation_dropout
_UpperCAmelCase : List[Any] = activation_function
_UpperCAmelCase : int = init_std
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : str = use_cache
_UpperCAmelCase : Union[str, Any] = decoder_layers
_UpperCAmelCase : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase : Any = max_target_positions
super().__init__(
pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , decoder_start_token_id=lowerCamelCase__ , **lowerCamelCase__ , )
| 322 | 0 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Tuple = 2
while digits < n:
index += 1
__lowerCAmelCase : List[str] = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class A__ ( A__ ):
A__ = 42
A__ = 42
class A__ ( nn.Module ):
A__ = 42
A__ = (16, 32, 96, 2_56)
A__ = jnp.floataa
def A ( self : Any ) -> Optional[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE =[]
for i in range(len(self.block_out_channels ) - 1 ):
_SCREAMING_SNAKE_CASE =self.block_out_channels[i]
_SCREAMING_SNAKE_CASE =self.block_out_channels[i + 1]
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_a )
_SCREAMING_SNAKE_CASE =blocks
_SCREAMING_SNAKE_CASE =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , _a : List[str] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.conv_in(_a )
_SCREAMING_SNAKE_CASE =nn.silu(_a )
for block in self.blocks:
_SCREAMING_SNAKE_CASE =block(_a )
_SCREAMING_SNAKE_CASE =nn.silu(_a )
_SCREAMING_SNAKE_CASE =self.conv_out(_a )
return embedding
@flax_register_to_config
class A__ ( nn.Module , A__ , A__ ):
A__ = 32
A__ = 4
A__ = (
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'CrossAttnDownBlock2D',
'DownBlock2D',
)
A__ = False
A__ = (3_20, 6_40, 12_80, 12_80)
A__ = 2
A__ = 8
A__ = None
A__ = 12_80
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = 'rgb'
A__ = (16, 32, 96, 2_56)
def A ( self : List[str] , _a : jax.random.KeyArray ) -> FrozenDict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =(1, self.in_channels, self.sample_size, self.sample_size)
_SCREAMING_SNAKE_CASE =jnp.zeros(_a , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE =jnp.ones((1,) , dtype=jnp.intaa )
_SCREAMING_SNAKE_CASE =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE =(1, 3, self.sample_size * 8, self.sample_size * 8)
_SCREAMING_SNAKE_CASE =jnp.zeros(_a , dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =jax.random.split(_a )
_SCREAMING_SNAKE_CASE ={'params': params_rng, 'dropout': dropout_rng}
return self.init(_a , _a , _a , _a , _a )["params"]
def A ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.block_out_channels
_SCREAMING_SNAKE_CASE =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_SCREAMING_SNAKE_CASE =self.num_attention_heads or self.attention_head_dim
# input
_SCREAMING_SNAKE_CASE =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_SCREAMING_SNAKE_CASE =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_SCREAMING_SNAKE_CASE =FlaxTimestepEmbedding(_a , dtype=self.dtype )
_SCREAMING_SNAKE_CASE =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_SCREAMING_SNAKE_CASE =self.only_cross_attention
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =(only_cross_attention,) * len(self.down_block_types )
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE =(num_attention_heads,) * len(self.down_block_types )
# down
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =block_out_channels[0]
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
for i, down_block_type in enumerate(self.down_block_types ):
_SCREAMING_SNAKE_CASE =output_channel
_SCREAMING_SNAKE_CASE =block_out_channels[i]
_SCREAMING_SNAKE_CASE =i == len(_a ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_SCREAMING_SNAKE_CASE =FlaxCrossAttnDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_SCREAMING_SNAKE_CASE =FlaxDownBlockaD(
in_channels=_a , out_channels=_a , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_a )
for _ in range(self.layers_per_block ):
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
if not is_final_block:
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_a )
_SCREAMING_SNAKE_CASE =down_blocks
_SCREAMING_SNAKE_CASE =controlnet_down_blocks
# mid
_SCREAMING_SNAKE_CASE =block_out_channels[-1]
_SCREAMING_SNAKE_CASE =FlaxUNetMidBlockaDCrossAttn(
in_channels=_a , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_SCREAMING_SNAKE_CASE =nn.Conv(
_a , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , _a : int , _a : Any , _a : List[str] , _a : List[str] , _a : float = 1.0 , _a : bool = True , _a : bool = False , ) -> Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_SCREAMING_SNAKE_CASE =jnp.flip(_a , axis=1 )
# 1. time
if not isinstance(_a , jnp.ndarray ):
_SCREAMING_SNAKE_CASE =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_a , jnp.ndarray ) and len(timesteps.shape ) == 0:
_SCREAMING_SNAKE_CASE =timesteps.astype(dtype=jnp.floataa )
_SCREAMING_SNAKE_CASE =jnp.expand_dims(_a , 0 )
_SCREAMING_SNAKE_CASE =self.time_proj(_a )
_SCREAMING_SNAKE_CASE =self.time_embedding(_a )
# 2. pre-process
_SCREAMING_SNAKE_CASE =jnp.transpose(_a , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE =self.conv_in(_a )
_SCREAMING_SNAKE_CASE =jnp.transpose(_a , (0, 2, 3, 1) )
_SCREAMING_SNAKE_CASE =self.controlnet_cond_embedding(_a )
sample += controlnet_cond
# 3. down
_SCREAMING_SNAKE_CASE =(sample,)
for down_block in self.down_blocks:
if isinstance(_a , _a ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =down_block(_a , _a , _a , deterministic=not train )
else:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =down_block(_a , _a , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_SCREAMING_SNAKE_CASE =self.mid_block(_a , _a , _a , deterministic=not train )
# 5. contronet blocks
_SCREAMING_SNAKE_CASE =()
for down_block_res_sample, controlnet_block in zip(_a , self.controlnet_down_blocks ):
_SCREAMING_SNAKE_CASE =controlnet_block(_a )
controlnet_down_block_res_samples += (down_block_res_sample,)
_SCREAMING_SNAKE_CASE =controlnet_down_block_res_samples
_SCREAMING_SNAKE_CASE =self.controlnet_mid_block(_a )
# 6. scaling
_SCREAMING_SNAKE_CASE =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_a , mid_block_res_sample=_a )
| 365 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def _lowerCAmelCase ( _UpperCamelCase : int ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =int(number**0.5 )
return number == sq * sq
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> tuple[int, int]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_SCREAMING_SNAKE_CASE =x_den * y_den * z_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def _lowerCAmelCase ( _UpperCamelCase : int = 35 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =set()
_SCREAMING_SNAKE_CASE =42
_SCREAMING_SNAKE_CASE =Fraction(0 )
_SCREAMING_SNAKE_CASE =42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_SCREAMING_SNAKE_CASE =x_num * y_den + x_den * y_num
_SCREAMING_SNAKE_CASE =x_den * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =(
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_SCREAMING_SNAKE_CASE =x_den * x_den * y_den * y_den
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=-1
_SCREAMING_SNAKE_CASE =x_num * y_num
_SCREAMING_SNAKE_CASE =x_den * y_num + x_num * y_den
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
# n=2
_SCREAMING_SNAKE_CASE =x_num * x_num * y_num * y_num
_SCREAMING_SNAKE_CASE =(
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCamelCase ) and is_sq(_UpperCamelCase ):
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =int(sqrt(_UpperCamelCase ) )
_SCREAMING_SNAKE_CASE =gcd(_UpperCamelCase , _UpperCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_SCREAMING_SNAKE_CASE =add_three(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
unique_s.add(_UpperCamelCase )
for num, den in unique_s:
total += Fraction(_UpperCamelCase , _UpperCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 114 | 0 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Optional[int] ) ->Tuple:
snake_case__ : str = 'hf-internal-testing/tiny-random-t5'
snake_case__ : Tuple = AutoTokenizer.from_pretrained(__UpperCAmelCase )
snake_case__ : Any = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
snake_case__ : Any = tokenizer('This is me', return_tensors='pt' )
snake_case__ : Optional[Any] = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : Tuple = model.generate(**__UpperCAmelCase )
snake_case__ : List[str] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCAmelCase )
snake_case__ : str = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : str = model_reloaded.generate(**__UpperCAmelCase )
self.assertTrue(torch.allclose(__UpperCAmelCase, __UpperCAmelCase ) )
def lowercase_ ( self : Dict ) ->Tuple:
snake_case__ : Optional[Any] = 'hf-internal-testing/tiny-random-t5'
snake_case__ : List[str] = AutoModelForSeqaSeqLM.from_pretrained(__UpperCAmelCase )
snake_case__ : Dict = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__UpperCAmelCase ):
model.save_pretrained(__UpperCAmelCase )
snake_case__ : Union[str, Any] = model.reverse_bettertransformer()
model.save_pretrained(__UpperCAmelCase )
| 277 | from __future__ import annotations
class A :
def __init__(self : Union[str, Any] , __UpperCAmelCase : list[list[int]] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(__UpperCAmelCase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(__UpperCAmelCase ) != cols:
raise error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def lowercase_ (self : Any ) -> list[list[int]]:
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
return len(self.rows )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return len(self.rows[0] )
@property
def lowercase_ (self : List[Any] ) -> tuple[int, int]:
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return self.order[0] == self.order[1]
def lowercase_ (self : Any ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : int ) -> int:
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def lowercase_ (self : Tuple ) -> bool:
"""simple docstring"""
return bool(self.determinant() )
def lowercase_ (self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(__UpperCAmelCase ).determinant()
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> int:
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Union[str, Any] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[self.get_minor(__UpperCAmelCase , __UpperCAmelCase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def lowercase_ (self : List[str] ) -> Matrix:
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def lowercase_ (self : Optional[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(__UpperCAmelCase )
def lowercase_ (self : List[Any] ) -> Matrix:
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__(self : Dict ) -> str:
"""simple docstring"""
return str(self.rows )
def __str__(self : Optional[Any] ) -> str:
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(__UpperCAmelCase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def lowercase_ (self : Optional[int] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in row:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(__UpperCAmelCase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : list[int] , __UpperCAmelCase : int | None = None ) -> None:
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise type_error
for value in column:
if not isinstance(__UpperCAmelCase , (int, float) ):
raise type_error
if len(__UpperCAmelCase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__(self : Any , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return NotImplemented
return self.rows == other.rows
def __ne__(self : int , __UpperCAmelCase : object ) -> bool:
"""simple docstring"""
return not self == other
def __neg__(self : Dict ) -> Matrix:
"""simple docstring"""
return self * -1
def __add__(self : Dict , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__(self : Optional[Any] , __UpperCAmelCase : Matrix ) -> Matrix:
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__(self : Tuple , __UpperCAmelCase : Matrix | int | float ) -> Matrix:
"""simple docstring"""
if isinstance(__UpperCAmelCase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__(self : List[Any] , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def lowercase_ (cls : Dict , __UpperCAmelCase : list[int] , __UpperCAmelCase : list[int] ) -> int:
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 65 | 0 |
def A ( _lowercase = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE : Optional[Any] = int(_lowercase )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 2
SCREAMING_SNAKE_CASE : Dict = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
SCREAMING_SNAKE_CASE : List[str] = i
while n % i == 0:
SCREAMING_SNAKE_CASE : List[str] = n // i
i += 1
return int(_lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 258 | from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258 | 1 |
"""simple docstring"""
import re
def UpperCAmelCase ( UpperCAmelCase ) -> str:
if len(re.findall('[ATCG]' , __lowercase ) ) != len(__lowercase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' , 'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 69 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCAmelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : torch.FloatTensor
UpperCamelCase_ : Optional[torch.FloatTensor] = None
class lowerCAmelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
UpperCamelCase_ : Tuple = 2
@register_to_config
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : float = 0.02 , SCREAMING_SNAKE_CASE_ : float = 1_00 , SCREAMING_SNAKE_CASE_ : float = 1.007 , SCREAMING_SNAKE_CASE_ : float = 80 , SCREAMING_SNAKE_CASE_ : float = 0.05 , SCREAMING_SNAKE_CASE_ : float = 50 , ) -> Optional[int]:
'''simple docstring'''
A: Union[str, Any] = sigma_max
# setable values
A: int = None
A: np.IntTensor = None
A: torch.FloatTensor = None # sigma(t_i)
def _snake_case ( self : str , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : Optional[int] = None ) -> torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, torch.device] = None ) -> Optional[Any]:
'''simple docstring'''
A: List[Any] = num_inference_steps
A: List[str] = np.arange(0 , self.num_inference_steps )[::-1].copy()
A: Any = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
A: str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
A: Tuple = torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.floataa , device=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
A: str = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
A: List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
A: Optional[Any] = self.config.s_noise * randn_tensor(sample.shape , generator=SCREAMING_SNAKE_CASE_ ).to(sample.device )
A: Optional[Any] = sigma + gamma * sigma
A: List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: Union[str, Any] = sample_hat + sigma_hat * model_output
A: str = (sample_hat - pred_original_sample) / sigma_hat
A: Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : torch.FloatTensor , SCREAMING_SNAKE_CASE_ : bool = True , ) -> Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
A: int = sample_prev + sigma_prev * model_output
A: List[Any] = (sample_prev - pred_original_sample) / sigma_prev
A: Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=SCREAMING_SNAKE_CASE_ , derivative=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def _snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
'''simple docstring'''
raise NotImplementedError()
| 319 | 0 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = "" )-> dict[str, float]:
"""simple docstring"""
UpperCamelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
UpperCamelCase_ = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" )
UpperCamelCase_ = soup.find_all("td" , attrs="titleColumn" )
UpperCamelCase_ = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
}
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = "IMDb_Top_250_Movies.csv" )-> None:
"""simple docstring"""
UpperCamelCase_ = get_imdb_top_aaa_movies()
with open(SCREAMING_SNAKE_CASE_ , "w" , newline="" ) as out_file:
UpperCamelCase_ = csv.writer(SCREAMING_SNAKE_CASE_ )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 60 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE :List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( snake_case ):
def __init__( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )-> Optional[Any]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=_lowercase , speech_processor=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , feature_extractor=_lowercase , )
def UpperCAmelCase_ ( self , _lowercase = "auto" )-> str:
if slice_size == "auto":
UpperCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def UpperCAmelCase_ ( self )-> Optional[int]:
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def __call__( self , _lowercase , _lowercase=16_000 , _lowercase = 512 , _lowercase = 512 , _lowercase = 50 , _lowercase = 7.5 , _lowercase = None , _lowercase = 1 , _lowercase = 0.0 , _lowercase = None , _lowercase = None , _lowercase = "pil" , _lowercase = True , _lowercase = None , _lowercase = 1 , **_lowercase , )-> str:
UpperCamelCase_ = self.speech_processor.feature_extractor(
_lowercase , return_tensors="pt" , sampling_rate=_lowercase ).input_features.to(self.device )
UpperCamelCase_ = self.speech_model.generate(_lowercase , max_length=480_000 )
UpperCamelCase_ = self.speech_processor.tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , normalize=_lowercase )[
0
]
if isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = 1
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = len(_lowercase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowercase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowercase , _lowercase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowercase )}." )
# get prompt text embeddings
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
UpperCamelCase_ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCamelCase_ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = text_embeddings.shape
UpperCamelCase_ = text_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = text_embeddings.view(bs_embed * num_images_per_prompt , _lowercase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ = 42
if negative_prompt is None:
UpperCamelCase_ = [""] * batch_size
elif type(_lowercase ) is not type(_lowercase ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_lowercase )} !="
F" {type(_lowercase )}." )
elif isinstance(_lowercase , _lowercase ):
UpperCamelCase_ = [negative_prompt]
elif batch_size != len(_lowercase ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_lowercase )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
UpperCamelCase_ = negative_prompt
UpperCamelCase_ = text_input_ids.shape[-1]
UpperCamelCase_ = self.tokenizer(
_lowercase , padding="max_length" , max_length=_lowercase , truncation=_lowercase , return_tensors="pt" , )
UpperCamelCase_ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ = uncond_embeddings.shape[1]
UpperCamelCase_ = uncond_embeddings.repeat(1 , _lowercase , 1 )
UpperCamelCase_ = uncond_embeddings.view(batch_size * num_images_per_prompt , _lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device="cpu" , dtype=_lowercase ).to(
self.device )
else:
UpperCamelCase_ = torch.randn(_lowercase , generator=_lowercase , device=self.device , dtype=_lowercase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
UpperCamelCase_ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowercase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ = {}
if accepts_eta:
UpperCamelCase_ = eta
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ = self.scheduler.scale_model_input(_lowercase , _lowercase )
# predict the noise residual
UpperCamelCase_ = self.unet(_lowercase , _lowercase , encoder_hidden_states=_lowercase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_ , UpperCamelCase_ = noise_pred.chunk(2 )
UpperCamelCase_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ = self.scheduler.step(_lowercase , _lowercase , _lowercase , **_lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowercase , _lowercase , _lowercase )
UpperCamelCase_ = 1 / 0.18_215 * latents
UpperCamelCase_ = self.vae.decode(_lowercase ).sample
UpperCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ = self.numpy_to_pil(_lowercase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=_lowercase , nsfw_content_detected=_lowercase )
| 60 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: Tuple = ProphetNetTokenizer
lowerCamelCase__: Tuple = False
def _lowerCamelCase ( self: List[str] ) -> List[str]:
super().setUp()
__UpperCAmelCase : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _lowerCamelCase ( self: str , __lowerCamelCase: List[str] ) -> Tuple:
__UpperCAmelCase : Any = "UNwant\u00E9d,running"
__UpperCAmelCase : Optional[Any] = "unwanted, running"
return input_text, output_text
def _lowerCamelCase ( self: List[Any] ) -> Any:
__UpperCAmelCase : int = self.tokenizer_class(self.vocab_file )
__UpperCAmelCase : Optional[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(__lowerCamelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [9, 6, 7, 12, 10, 11] )
def _lowerCamelCase ( self: Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def _lowerCamelCase ( self: List[str] ) -> str:
__UpperCAmelCase : List[Any] = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self: List[str] ) -> Dict:
__UpperCAmelCase : int = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def _lowerCamelCase ( self: Union[str, Any] ) -> Optional[int]:
__UpperCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self: Any ) -> Optional[Any]:
__UpperCAmelCase : List[str] = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def _lowerCamelCase ( self: List[Any] ) -> Union[str, Any]:
__UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : Dict = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self: Optional[int] ) -> List[str]:
__UpperCAmelCase : int = BasicTokenizer(do_lower_case=__lowerCamelCase , strip_accents=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def _lowerCamelCase ( self: Optional[Any] ) -> Any:
__UpperCAmelCase : Any = BasicTokenizer(do_lower_case=__lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def _lowerCamelCase ( self: Optional[int] ) -> Any:
__UpperCAmelCase : str = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
__UpperCAmelCase : Optional[Any] = {}
for i, token in enumerate(__lowerCamelCase ):
__UpperCAmelCase : str = i
__UpperCAmelCase : int = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def _lowerCamelCase ( self: Optional[int] ) -> Tuple:
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCAmelCase : List[str] = [10_37, 21_46, 2_04_23, 20_05, 76_80, 78_49, 39_89, 10_12, 1_02]
__UpperCAmelCase : List[Any] = tokenizer(__lowerCamelCase , padding=__lowerCamelCase , return_tensors="pt" )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Tuple = list(batch.input_ids.numpy()[0] )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def _lowerCamelCase ( self: Tuple ) -> Tuple:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def _lowerCamelCase ( self: Any ) -> Dict:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def _lowerCamelCase ( self: List[str] ) -> str:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def _lowerCamelCase ( self: Any ) -> Tuple:
__UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
__UpperCAmelCase : Dict = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
__UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
__UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == text + [1_02]
assert encoded_pair == text + [1_02] + text_a + [1_02]
| 157 | def _UpperCamelCase ( snake_case__ ) -> list:
__UpperCAmelCase : Dict = [0] * len(snake_case__ )
for i in range(1, len(snake_case__ ) ):
# use last results for better performance - dynamic programming
__UpperCAmelCase : Any = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
__UpperCAmelCase : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
__UpperCAmelCase : Tuple = j
return prefix_result
def _UpperCamelCase ( snake_case__ ) -> int:
return max(prefix_function(snake_case__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 157 | 1 |
def lowerCAmelCase__ ( a__: int = 1_0_0 ) -> int:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = 0
_UpperCAmelCase = n + 1 # maximum limit
for a in range(2 , a__ ):
for b in range(2 , a__ ):
_UpperCAmelCase = a**b # calculates the current power
collect_powers.add(a__ ) # adds the result to the set
return len(a__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 185 |
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCAmelCase__ :Tuple = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
lowerCAmelCase__ :Optional[int] = parser.parse_args()
if args.model_type == "bert":
lowerCAmelCase__ :Tuple = BertForMaskedLM.from_pretrained(args.model_name)
lowerCAmelCase__ :Optional[int] = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
lowerCAmelCase__ :Any = model.state_dict()
lowerCAmelCase__ :Dict = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCAmelCase__ :List[Any] = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCAmelCase__ :Union[str, Any] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCAmelCase__ :str = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCAmelCase__ :List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCAmelCase__ :int = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCAmelCase__ :List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCAmelCase__ :Optional[int] = state_dict['''cls.predictions.decoder.weight''']
lowerCAmelCase__ :List[str] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCAmelCase__ :Any = state_dict[f'''cls.predictions.transform.dense.{w}''']
lowerCAmelCase__ :List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 185 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = """MobileNetV1Config"""
# Base docstring
UpperCamelCase_ = """google/mobilenet_v1_1.0_224"""
UpperCamelCase_ = [1, 10_24, 7, 7]
# Image classification docstring
UpperCamelCase_ = """google/mobilenet_v1_1.0_224"""
UpperCamelCase_ = """tabby, tabby cat"""
UpperCamelCase_ = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _UpperCAmelCase ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : str=None ) -> str:
_lowerCAmelCase : Optional[int] = {}
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Any = model.mobilenet_va
else:
_lowerCAmelCase : List[str] = model
_lowerCAmelCase : List[str] = "MobilenetV1/Conv2d_0/"
_lowerCAmelCase : Any = backbone.conv_stem.convolution.weight
_lowerCAmelCase : Optional[Any] = backbone.conv_stem.normalization.bias
_lowerCAmelCase : Any = backbone.conv_stem.normalization.weight
_lowerCAmelCase : Dict = backbone.conv_stem.normalization.running_mean
_lowerCAmelCase : Optional[Any] = backbone.conv_stem.normalization.running_var
for i in range(13 ):
_lowerCAmelCase : Tuple = i + 1
_lowerCAmelCase : int = i * 2
_lowerCAmelCase : Optional[Any] = backbone.layer[pt_index]
_lowerCAmelCase : Optional[int] = f'MobilenetV1/Conv2d_{tf_index}_depthwise/'
_lowerCAmelCase : Dict = pointer.convolution.weight
_lowerCAmelCase : str = pointer.normalization.bias
_lowerCAmelCase : Dict = pointer.normalization.weight
_lowerCAmelCase : str = pointer.normalization.running_mean
_lowerCAmelCase : Dict = pointer.normalization.running_var
_lowerCAmelCase : Union[str, Any] = backbone.layer[pt_index + 1]
_lowerCAmelCase : str = f'MobilenetV1/Conv2d_{tf_index}_pointwise/'
_lowerCAmelCase : int = pointer.convolution.weight
_lowerCAmelCase : Optional[Any] = pointer.normalization.bias
_lowerCAmelCase : Tuple = pointer.normalization.weight
_lowerCAmelCase : Dict = pointer.normalization.running_mean
_lowerCAmelCase : Optional[int] = pointer.normalization.running_var
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = "MobilenetV1/Logits/Conv2d_1c_1x1/"
_lowerCAmelCase : List[Any] = model.classifier.weight
_lowerCAmelCase : int = model.classifier.bias
return tf_to_pt_map
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple ) -> Dict:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"""Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see """
"""https://www.tensorflow.org/install/ for installation instructions.""" )
raise
# Load weights from TF model
_lowerCAmelCase : Optional[Any] = tf.train.list_variables(_lowerCamelCase )
_lowerCAmelCase : Any = {}
for name, shape in init_vars:
logger.info(f'Loading TF weight {name} with shape {shape}' )
_lowerCAmelCase : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Tuple = array
# Build TF to PyTorch weights loading map
_lowerCAmelCase : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f'Importing {name}' )
if name not in tf_weights:
logger.info(f'{name} not in tf pre-trained weights, skipping' )
continue
_lowerCAmelCase : Tuple = tf_weights[name]
if "depthwise_weights" in name:
logger.info("""Transposing depthwise""" )
_lowerCAmelCase : int = np.transpose(_lowerCamelCase , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("""Transposing""" )
if len(pointer.shape ) == 2: # copying into linear layer
_lowerCAmelCase : List[Any] = array.squeeze().transpose()
else:
_lowerCAmelCase : List[Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(f'Initialize PyTorch weight {name} {array.shape}' )
_lowerCAmelCase : Tuple = torch.from_numpy(_lowerCamelCase )
tf_weights.pop(_lowerCamelCase , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp""" , _lowerCamelCase )
tf_weights.pop(name + """/RMSProp_1""" , _lowerCamelCase )
tf_weights.pop(name + """/ExponentialMovingAverage""" , _lowerCamelCase )
logger.info(f'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def _UpperCAmelCase ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : nn.Convad ) -> Dict:
_lowerCAmelCase : Optional[Any] = features.shape[-2:]
_lowerCAmelCase : int = conv_layer.stride
_lowerCAmelCase : Optional[Any] = conv_layer.kernel_size
if in_height % stride_height == 0:
_lowerCAmelCase : Union[str, Any] = max(kernel_height - stride_height , 0 )
else:
_lowerCAmelCase : List[str] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
_lowerCAmelCase : List[Any] = max(kernel_width - stride_width , 0 )
else:
_lowerCAmelCase : Optional[int] = max(kernel_width - (in_width % stride_width) , 0 )
_lowerCAmelCase : Tuple = pad_along_width // 2
_lowerCAmelCase : Tuple = pad_along_width - pad_left
_lowerCAmelCase : List[Any] = pad_along_height // 2
_lowerCAmelCase : List[Any] = pad_along_height - pad_top
_lowerCAmelCase : Dict = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , """constant""" , 0.0 )
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = 1 , snake_case_ = False , snake_case_ = True , snake_case_ = True , ):
super().__init__()
_lowerCAmelCase : int = config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
_lowerCAmelCase : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
_lowerCAmelCase : str = nn.Convad(
in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ , groups=lowercase_ , bias=lowercase_ , padding_mode="""zeros""" , )
if use_normalization:
_lowerCAmelCase : Dict = nn.BatchNormad(
num_features=lowercase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=lowercase_ , track_running_stats=lowercase_ , )
else:
_lowerCAmelCase : Optional[int] = None
if use_activation:
if isinstance(lowercase_ , lowercase_ ):
_lowerCAmelCase : str = ACTaFN[use_activation]
elif isinstance(config.hidden_act , lowercase_ ):
_lowerCAmelCase : int = ACTaFN[config.hidden_act]
else:
_lowerCAmelCase : Tuple = config.hidden_act
else:
_lowerCAmelCase : Optional[Any] = None
def __UpperCamelCase ( self , snake_case_ ):
if self.config.tf_padding:
_lowerCAmelCase : int = apply_tf_padding(lowercase_ , self.convolution )
_lowerCAmelCase : List[str] = self.convolution(lowercase_ )
if self.normalization is not None:
_lowerCAmelCase : Any = self.normalization(lowercase_ )
if self.activation is not None:
_lowerCAmelCase : Any = self.activation(lowercase_ )
return features
class a_ (__A ):
__lowerCAmelCase : int = MobileNetVaConfig
__lowerCAmelCase : List[Any] = load_tf_weights_in_mobilenet_va
__lowerCAmelCase : Tuple = "mobilenet_v1"
__lowerCAmelCase : str = "pixel_values"
__lowerCAmelCase : Dict = False
def __UpperCamelCase ( self , snake_case_ ):
if isinstance(lowercase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowercase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.""" , __A , )
class a_ (__A ):
def __init__( self , snake_case_ , snake_case_ = True ):
super().__init__(lowercase_ )
_lowerCAmelCase : str = config
_lowerCAmelCase : List[Any] = 3_2
_lowerCAmelCase : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth )
_lowerCAmelCase : Optional[int] = MobileNetVaConvLayer(
lowercase_ , in_channels=config.num_channels , out_channels=lowercase_ , kernel_size=3 , stride=2 , )
_lowerCAmelCase : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
_lowerCAmelCase : Any = nn.ModuleList()
for i in range(1_3 ):
_lowerCAmelCase : List[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
_lowerCAmelCase : str = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=3 , stride=strides[i] , groups=lowercase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=1 , ) )
_lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def __UpperCamelCase ( self , snake_case_ ):
raise NotImplementedError
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
_lowerCAmelCase : Dict = self.conv_stem(lowercase_ )
_lowerCAmelCase : Union[str, Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
_lowerCAmelCase : List[Any] = layer_module(lowercase_ )
if output_hidden_states:
_lowerCAmelCase : Optional[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : int = hidden_states
if self.pooler is not None:
_lowerCAmelCase : Any = torch.flatten(self.pooler(lowercase_ ) , start_dim=1 )
else:
_lowerCAmelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=lowercase_ , )
@add_start_docstrings(
"""\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , __A , )
class a_ (__A ):
def __init__( self , snake_case_ ):
super().__init__(lowercase_ )
_lowerCAmelCase : int = config.num_labels
_lowerCAmelCase : Optional[int] = MobileNetVaModel(lowercase_ )
_lowerCAmelCase : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
_lowerCAmelCase : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase_ )
_lowerCAmelCase : Dict = nn.Linear(lowercase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Any = self.mobilenet_va(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : Any = self.classifier(self.dropout(lowercase_ ) )
_lowerCAmelCase : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : Dict = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Optional[Any] = "single_label_classification"
else:
_lowerCAmelCase : Optional[int] = "multi_label_classification"
if self.config.problem_type == "regression":
_lowerCAmelCase : Tuple = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : List[str] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Optional[int] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : Optional[Any] = BCEWithLogitsLoss()
_lowerCAmelCase : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
_lowerCAmelCase : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , )
| 309 | import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ ( unittest.TestCase ):
@require_torch
def __UpperCamelCase ( self : Optional[int] ) -> List[Any]:
lowercase__ : Union[str, Any] = pipeline(
task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" )
lowercase__ : List[str] = load_dataset("ashraq/esc50" )
lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"]
lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : str ) -> Optional[int]:
pass
@slow
@require_torch
def __UpperCamelCase ( self : List[str] ) -> int:
lowercase__ : Tuple = pipeline(
task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , )
# This is an audio of a dog
lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" )
lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"]
lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
] , )
lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
lowercase__ : Tuple = audio_classifier(
[audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 )
self.assertEqual(
nested_simplify(lowercase_ ) , [
[
{"score": 0.9_99, "label": "Sound of a dog"},
{"score": 0.0_01, "label": "Sound of vaccum cleaner"},
],
]
* 5 , )
@unittest.skip("No models are available in TF" )
def __UpperCamelCase ( self : Union[str, Any] ) -> Dict:
pass
| 87 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase :
def __init__( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
lowerCamelCase__ : Any = data
lowerCamelCase__ : Node | None = None
class lowerCAmelCase :
def __init__( self : Optional[int] ) -> Tuple:
lowerCamelCase__ : Optional[Any] = None
lowerCamelCase__ : Optional[Any] = None
def __iter__( self : int ) -> Dict:
lowerCamelCase__ : int = self.head
while self.head:
yield node.data
lowerCamelCase__ : List[str] = node.next
if node == self.head:
break
def __len__( self : Any ) -> Tuple:
return sum(1 for _ in self )
def __repr__( self : int ) -> Tuple:
return "->".join(str(_snake_case ) for item in iter(self ) )
def A_ ( self : List[Any] , UpperCAmelCase : Any ) -> str:
self.insert_nth(len(self ) , _snake_case )
def A_ ( self : List[str] , UpperCAmelCase : str ) -> List[Any]:
self.insert_nth(0 , _snake_case )
def A_ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Dict ) -> Any:
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
lowerCamelCase__ : Any = Node(_snake_case )
if self.head is None:
lowerCamelCase__ : Any = new_node # first node points itself
lowerCamelCase__ : Union[str, Any] = new_node
elif index == 0: # insert at head
lowerCamelCase__ : int = self.head
lowerCamelCase__ : Any = new_node
else:
lowerCamelCase__ : Any = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : Optional[Any] = temp.next
lowerCamelCase__ : Optional[Any] = temp.next
lowerCamelCase__ : Optional[Any] = new_node
if index == len(self ) - 1: # insert at tail
lowerCamelCase__ : Optional[int] = new_node
def A_ ( self : List[str] ) -> Optional[int]:
return self.delete_nth(0 )
def A_ ( self : Tuple ) -> Tuple:
return self.delete_nth(len(self ) - 1 )
def A_ ( self : List[str] , UpperCAmelCase : Optional[int] = 0 ) -> List[Any]:
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
lowerCamelCase__ : str = self.head
if self.head == self.tail: # just one node
lowerCamelCase__ : int = None
elif index == 0: # delete head node
lowerCamelCase__ : List[str] = self.tail.next.next
lowerCamelCase__ : List[Any] = self.head.next
else:
lowerCamelCase__ : List[str] = self.head
for _ in range(index - 1 ):
lowerCamelCase__ : str = temp.next
lowerCamelCase__ : Dict = temp.next
lowerCamelCase__ : str = temp.next.next
if index == len(self ) - 1: # delete at tail
lowerCamelCase__ : Any = temp
return delete_node.data
def A_ ( self : int ) -> List[str]:
return len(self ) == 0
def SCREAMING_SNAKE_CASE ( ) -> None:
lowerCamelCase__ : Optional[int] = CircularLinkedList()
assert len(_SCREAMING_SNAKE_CASE ) == 0
assert circular_linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
circular_linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> None:
lowerCamelCase__ : Optional[Any] = len(_UpperCAmelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['. ' * i + 'Q ' + '. ' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_UpperCAmelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _UpperCAmelCase , _UpperCAmelCase , )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> None:
lowerCamelCase__ : list[list[str]] = []
depth_first_search([] , [] , [] , _UpperCAmelCase , _UpperCAmelCase )
# Print all the boards
for board in boards:
for column in board:
print(_UpperCAmelCase )
print('' )
print(len(_UpperCAmelCase ) , 'solutions were found.' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 45 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
_UpperCamelCase : List[Any] = logging.get_logger(__name__)
_UpperCamelCase : Any = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase_ ( __lowerCamelCase):
lowerCamelCase__ : Optional[Any] = """blip_2_vision_model"""
def __init__( self , a=1_4_0_8 , a=6_1_4_4 , a=3_9 , a=1_6 , a=2_2_4 , a=1_4 , a="gelu" , a=0.00_001 , a=0.0 , a=1e-10 , a=True , **a , ) -> int:
super().__init__(**a )
lowercase__ : int = hidden_size
lowercase__ : Dict = intermediate_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : List[str] = patch_size
lowercase__ : Tuple = image_size
lowercase__ : Optional[Any] = initializer_range
lowercase__ : Dict = attention_dropout
lowercase__ : Optional[Any] = layer_norm_eps
lowercase__ : Optional[Any] = hidden_act
lowercase__ : str = qkv_bias
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> Union[str, Any]:
cls._set_token_in_kwargs(a )
lowercase__ , lowercase__ : List[Any] = cls.get_config_dict(a , **a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase__ : Union[str, Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class UpperCAmelCase_ ( __lowerCamelCase):
lowerCamelCase__ : int = """blip_2_qformer"""
def __init__( self , a=3_0_5_2_2 , a=7_6_8 , a=1_2 , a=1_2 , a=3_0_7_2 , a="gelu" , a=0.1 , a=0.1 , a=5_1_2 , a=0.02 , a=1e-12 , a=0 , a="absolute" , a=2 , a=1_4_0_8 , **a , ) -> Dict:
super().__init__(pad_token_id=a , **a )
lowercase__ : Optional[int] = vocab_size
lowercase__ : Tuple = hidden_size
lowercase__ : Optional[Any] = num_hidden_layers
lowercase__ : int = num_attention_heads
lowercase__ : Any = hidden_act
lowercase__ : Any = intermediate_size
lowercase__ : Union[str, Any] = hidden_dropout_prob
lowercase__ : int = attention_probs_dropout_prob
lowercase__ : Any = max_position_embeddings
lowercase__ : List[str] = initializer_range
lowercase__ : int = layer_norm_eps
lowercase__ : List[Any] = position_embedding_type
lowercase__ : List[str] = cross_attention_frequency
lowercase__ : Any = encoder_hidden_size
@classmethod
def _UpperCAmelCase ( cls , a , **a ) -> Optional[Any]:
cls._set_token_in_kwargs(a )
lowercase__ , lowercase__ : Optional[int] = cls.get_config_dict(a , **a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get('model_type' ) == "blip-2":
lowercase__ : Any = config_dict['qformer_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(a , **a )
class UpperCAmelCase_ ( __lowerCamelCase):
lowerCamelCase__ : List[Any] = """blip-2"""
lowerCamelCase__ : str = True
def __init__( self , a=None , a=None , a=None , a=3_2 , **a ) -> Optional[Any]:
super().__init__(**a )
if vision_config is None:
lowercase__ : Optional[Any] = {}
logger.info('vision_config is None. initializing the Blip2VisionConfig with default values.' )
if qformer_config is None:
lowercase__ : Dict = {}
logger.info('qformer_config is None. Initializing the Blip2QFormerConfig with default values.' )
if text_config is None:
lowercase__ : Any = {}
logger.info('text_config is None. Initializing the text config with default values (`OPTConfig`).' )
lowercase__ : Union[str, Any] = BlipaVisionConfig(**a )
lowercase__ : List[str] = BlipaQFormerConfig(**a )
lowercase__ : Dict = text_config['model_type'] if 'model_type' in text_config else 'opt'
lowercase__ : List[str] = CONFIG_MAPPING[text_model_type](**a )
lowercase__ : int = self.text_config.tie_word_embeddings
lowercase__ : str = self.text_config.is_encoder_decoder
lowercase__ : Any = num_query_tokens
lowercase__ : Optional[int] = self.vision_config.hidden_size
lowercase__ : str = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowercase__ : Tuple = 1.0
lowercase__ : str = 0.02
@classmethod
def _UpperCAmelCase ( cls , a , a , a , **a , ) -> List[str]:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **a , )
def _UpperCAmelCase ( self ) -> Optional[Any]:
lowercase__ : Optional[int] = copy.deepcopy(self.__dict__ )
lowercase__ : Dict = self.vision_config.to_dict()
lowercase__ : Optional[Any] = self.qformer_config.to_dict()
lowercase__ : Union[str, Any] = self.text_config.to_dict()
lowercase__ : Any = self.__class__.model_type
return output
| 77 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class A_ ( __lowerCamelCase ):
'''simple docstring'''
_UpperCamelCase : Tuple = """xlnet"""
_UpperCamelCase : Optional[Any] = ["""mems"""]
_UpperCamelCase : Tuple = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , snake_case=3_2000 , snake_case=1024 , snake_case=24 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=True , snake_case="bi" , snake_case=0.02 , snake_case=1E-12 , snake_case=0.1 , snake_case=512 , snake_case=None , snake_case=True , snake_case=False , snake_case=False , snake_case=-1 , snake_case=False , snake_case="last" , snake_case=True , snake_case="tanh" , snake_case=0.1 , snake_case=5 , snake_case=5 , snake_case=5 , snake_case=1 , snake_case=2 , **snake_case , ):
lowercase = vocab_size
lowercase = d_model
lowercase = n_layer
lowercase = n_head
if d_model % n_head != 0:
raise ValueError(F'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase = d_model // n_head
lowercase = ff_activation
lowercase = d_inner
lowercase = untie_r
lowercase = attn_type
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = dropout
lowercase = mem_len
lowercase = reuse_len
lowercase = bi_data
lowercase = clamp_len
lowercase = same_length
lowercase = summary_type
lowercase = summary_use_proj
lowercase = summary_activation
lowercase = summary_last_dropout
lowercase = start_n_top
lowercase = end_n_top
lowercase = bos_token_id
lowercase = pad_token_id
lowercase = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , snake_case , )
lowercase = kwargs['use_cache']
lowercase = use_mems_eval
lowercase = use_mems_train
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
logger.info(F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 195 | 0 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : Optional[Any] , lowercase : int = 13 , lowercase : int = 64 , lowercase : int = 2 , lowercase : int = 3 , lowercase : int = 3 , lowercase : bool = True , lowercase : bool = True , lowercase : int = 128 , lowercase : List[Any]=[16, 32, 64, 128] , lowercase : int = 7 , lowercase : int = 4 , lowercase : int = 37 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 10 , lowercase : float = 0.02 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 128 , lowercase : List[int] = [2, 2, 2, 2] , lowercase : int = 2 , lowercase : int = 2 , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = encoder_stride
_snake_case = num_attention_outputs
_snake_case = embed_dim
_snake_case = embed_dim + 1
_snake_case = resolution
_snake_case = depths
_snake_case = hidden_sizes
_snake_case = dim
_snake_case = mlp_expansion_ratio
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def A ( self : Union[str, Any] ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def A ( self : List[Any] , lowercase : Tuple , lowercase : Dict , lowercase : int ):
'''simple docstring'''
_snake_case = TFEfficientFormerModel(config=lowercase )
_snake_case = model(lowercase , training=lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : List[Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : Tuple ):
'''simple docstring'''
_snake_case = self.type_sequence_label_size
_snake_case = TFEfficientFormerForImageClassification(lowercase )
_snake_case = model(lowercase , labels=lowercase , training=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_snake_case = 1
_snake_case = TFEfficientFormerForImageClassification(lowercase )
_snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Tuple = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = TFEfficientFormerModelTester(self )
_snake_case = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def A ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def A ( self : List[Any] ):
'''simple docstring'''
pass
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : Dict , lowercase : Dict , lowercase : str ):
_snake_case = model_class(lowercase )
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_snake_case = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowercase ) , lowercase )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_snake_case = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_snake_case = seq_length * self.model_tester.chunk_length
else:
_snake_case = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_snake_case = outputs.decoder_hidden_states
self.asseretIsInstance(lowercase , (list, tuple) )
self.assertEqual(len(lowercase ) , lowercase )
_snake_case = getattr(self.model_tester , 'seq_length' , lowercase )
_snake_case = getattr(self.model_tester , 'decoder_seq_length' , lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Optional[Any] , lowercase : Optional[Any] , lowercase : Any , lowercase : List[str]=False ):
'''simple docstring'''
_snake_case = super()._prepare_for_class(lowercase , lowercase , return_labels=lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def A ( self : List[str] ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = TFEfficientFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = True
_snake_case = getattr(self.model_tester , 'seq_length' , lowercase )
_snake_case = getattr(self.model_tester , 'encoder_seq_length' , lowercase )
_snake_case = getattr(self.model_tester , 'key_length' , lowercase )
_snake_case = getattr(self.model_tester , 'chunk_length' , lowercase )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_snake_case = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_snake_case = True
_snake_case = False
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_snake_case = True
_snake_case = model_class(lowercase )
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) , training=lowercase )
_snake_case = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_snake_case = model_class(lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_snake_case = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_snake_case = model(lowercase )
self.assertTrue(outputs_dict is not None )
def a_ ( ) -> Any:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Union[str, Any] ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_snake_case = model(**lowercase , training=lowercase )
# verify the logits
_snake_case = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Any ):
'''simple docstring'''
_snake_case = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='tf' )
# forward pass
_snake_case = model(**lowercase , training=lowercase )
# verify the logits
_snake_case = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) ) | 130 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : Tuple = RobertaTokenizer
_UpperCAmelCase : Dict = RobertaTokenizerFast
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Any = {"cls_token": "<s>"}
def A ( self : Dict ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_snake_case = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
_snake_case = dict(zip(lowercase , range(len(lowercase ) ) ) )
_snake_case = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
_snake_case = {'unk_token': '<unk>'}
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowercase ) )
def A ( self : List[str] , **lowercase : List[str] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : List[str] , **lowercase : int ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **lowercase )
def A ( self : Optional[Any] , lowercase : List[str] ):
'''simple docstring'''
_snake_case = 'lower newer'
_snake_case = 'lower newer'
return input_text, output_text
def A ( self : str ):
'''simple docstring'''
_snake_case = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
_snake_case = 'lower newer'
_snake_case = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
_snake_case = tokenizer.tokenize(lowercase ) # , add_prefix_space=True)
self.assertListEqual(lowercase , lowercase )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase ) , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=lowercase ) , [0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2] , )
@slow
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = self.tokenizer_class.from_pretrained('roberta-base' )
_snake_case = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
_snake_case = tokenizer.encode(
'sequence builders' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase )
_snake_case = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def A ( self : int ):
'''simple docstring'''
_snake_case = self.get_tokenizer()
_snake_case = 'Encode this sequence.'
_snake_case = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(lowercase , lowercase )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase , add_prefix_space=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(lowercase , lowercase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
_snake_case = tokenizer.encode(lowercase , add_special_tokens=lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(lowercase , lowercase )
# Testing spaces after special tokens
_snake_case = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase )} ) # mask token has a left space
_snake_case = tokenizer.convert_tokens_to_ids(lowercase )
_snake_case = 'Encode <mask> sequence'
_snake_case = 'Encode <mask>sequence'
_snake_case = tokenizer.encode(lowercase )
_snake_case = encoded.index(lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(lowercase , lowercase )
_snake_case = tokenizer.encode(lowercase )
_snake_case = encoded.index(lowercase )
_snake_case = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(lowercase , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
pass
def A ( self : List[str] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = self.rust_tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = self.tokenizer_class.from_pretrained(lowercase , **lowercase )
_snake_case = 'A, <mask> AllenNLP sentence.'
_snake_case = tokenizer_r.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
_snake_case = tokenizer_p.encode_plus(lowercase , add_special_tokens=lowercase , return_token_type_ids=lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
_snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
_snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowercase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def A ( self : str ):
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
_snake_case = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
_snake_case = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['add_prefix_space'] , lowercase )
self.assertEqual(post_processor_state['trim_offsets'] , lowercase )
def A ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_snake_case = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
_snake_case = f'''{text_of_1_token} {text_of_1_token}'''
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ) + 1, len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowercase ), len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ) + 1, 1 + len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , )
_snake_case = self.rust_tokenizer_class.from_pretrained(
lowercase , use_fast=lowercase , add_prefix_space=lowercase , trim_offsets=lowercase )
_snake_case = tokenizer_r(lowercase , return_offsets_mapping=lowercase , add_special_tokens=lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowercase ), 1 + len(lowercase ) + 1 + len(lowercase )) , ) | 130 | 1 |
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 150 |
"""simple docstring"""
from sklearn.metrics import fa_score
import datasets
A__ : List[str] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n'
A__ : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n'
A__ : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowercase__ ( datasets.Metric ):
def UpperCAmelCase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("int32" ) ),
"references": datasets.Sequence(datasets.Value("int32" ) ),
}
if self.config_name == "multilabel"
else {
"predictions": datasets.Value("int32" ),
"references": datasets.Value("int32" ),
} ) , reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"] , )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int=None , snake_case__ : Optional[int]=1 , snake_case__ : int="binary" , snake_case__ : List[str]=None ):
lowerCamelCase_ : str =fa_score(
snake_case__ , snake_case__ , labels=snake_case__ , pos_label=snake_case__ , average=snake_case__ , sample_weight=snake_case__ )
return {"f1": float(snake_case__ ) if score.size == 1 else score}
| 144 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class lowercase ( snake_case__):
"""simple docstring"""
a__ : List[str] = "data2vec-text"
def __init__( self : str , __UpperCAmelCase : str=30_522 , __UpperCAmelCase : Union[str, Any]=768 , __UpperCAmelCase : Optional[Any]=12 , __UpperCAmelCase : Any=12 , __UpperCAmelCase : Optional[Any]=3_072 , __UpperCAmelCase : Optional[Any]="gelu" , __UpperCAmelCase : Any=0.1 , __UpperCAmelCase : Dict=0.1 , __UpperCAmelCase : Optional[Any]=512 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : Tuple=1E-12 , __UpperCAmelCase : List[str]=1 , __UpperCAmelCase : List[str]=0 , __UpperCAmelCase : str=2 , __UpperCAmelCase : Optional[int]="absolute" , __UpperCAmelCase : Optional[Any]=True , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : List[str] , ) -> Tuple:
super().__init__(pad_token_id=__UpperCAmelCase , bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= vocab_size
UpperCAmelCase_= hidden_size
UpperCAmelCase_= num_hidden_layers
UpperCAmelCase_= num_attention_heads
UpperCAmelCase_= hidden_act
UpperCAmelCase_= intermediate_size
UpperCAmelCase_= hidden_dropout_prob
UpperCAmelCase_= attention_probs_dropout_prob
UpperCAmelCase_= max_position_embeddings
UpperCAmelCase_= type_vocab_size
UpperCAmelCase_= initializer_range
UpperCAmelCase_= layer_norm_eps
UpperCAmelCase_= position_embedding_type
UpperCAmelCase_= use_cache
UpperCAmelCase_= classifier_dropout
class lowercase ( snake_case__):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_= {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase_= {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 361 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Tuple , __UpperCAmelCase : WhisperForConditionalGeneration , __UpperCAmelCase : WhisperProcessor , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : CLIPTextModel , __UpperCAmelCase : CLIPTokenizer , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __UpperCAmelCase : StableDiffusionSafetyChecker , __UpperCAmelCase : CLIPImageProcessor , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__UpperCAmelCase , speech_processor=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Union[str, int]] = "auto" ) -> List[Any]:
if slice_size == "auto":
UpperCAmelCase_= self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
self.enable_attention_slicing(__UpperCAmelCase )
@torch.no_grad()
def __call__( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str=16_000 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 50 , __UpperCAmelCase : float = 7.5 , __UpperCAmelCase : Optional[Union[str, List[str]]] = None , __UpperCAmelCase : Optional[int] = 1 , __UpperCAmelCase : float = 0.0 , __UpperCAmelCase : Optional[torch.Generator] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , __UpperCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __UpperCAmelCase : int = 1 , **__UpperCAmelCase : Union[str, Any] , ) -> Any:
UpperCAmelCase_= self.speech_processor.feature_extractor(
__UpperCAmelCase , return_tensors="""pt""" , sampling_rate=__UpperCAmelCase ).input_features.to(self.device )
UpperCAmelCase_= self.speech_model.generate(__UpperCAmelCase , max_length=480_000 )
UpperCAmelCase_= self.speech_processor.tokenizer.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , normalize=__UpperCAmelCase )[
0
]
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= 1
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= len(__UpperCAmelCase )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(__UpperCAmelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__UpperCAmelCase )}.""" )
# get prompt text embeddings
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCAmelCase_= text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_= self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
UpperCAmelCase_= text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_= self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_= text_embeddings.shape
UpperCAmelCase_= text_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCAmelCase_= guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCAmelCase_= 42
if negative_prompt is None:
UpperCAmelCase_= [""""""] * batch_size
elif type(__UpperCAmelCase ) is not type(__UpperCAmelCase ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCAmelCase )} !="""
F""" {type(__UpperCAmelCase )}.""" )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase_= [negative_prompt]
elif batch_size != len(__UpperCAmelCase ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(__UpperCAmelCase )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
UpperCAmelCase_= negative_prompt
UpperCAmelCase_= text_input_ids.shape[-1]
UpperCAmelCase_= self.tokenizer(
__UpperCAmelCase , padding="""max_length""" , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="""pt""" , )
UpperCAmelCase_= self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_= uncond_embeddings.shape[1]
UpperCAmelCase_= uncond_embeddings.repeat(1 , __UpperCAmelCase , 1 )
UpperCAmelCase_= uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_= torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCAmelCase_= (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCAmelCase_= text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device="""cpu""" , dtype=__UpperCAmelCase ).to(
self.device )
else:
UpperCAmelCase_= torch.randn(__UpperCAmelCase , generator=__UpperCAmelCase , device=self.device , dtype=__UpperCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
UpperCAmelCase_= latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCAmelCase_= self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCAmelCase_= latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCAmelCase_= """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCAmelCase_= {}
if accepts_eta:
UpperCAmelCase_= eta
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase_= torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase_= self.scheduler.scale_model_input(__UpperCAmelCase , __UpperCAmelCase )
# predict the noise residual
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCAmelCase_, UpperCAmelCase_= noise_pred.chunk(2 )
UpperCAmelCase_= noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_= self.scheduler.step(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= 1 / 0.18_215 * latents
UpperCAmelCase_= self.vae.decode(__UpperCAmelCase ).sample
UpperCAmelCase_= (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_= image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase_= self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCAmelCase , nsfw_content_detected=__UpperCAmelCase )
| 277 | 0 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__lowerCamelCase : Union[str, Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class a__ ( nn.Module ):
def __init__( self : int,_A : Optional[int] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = torchvision.models.resnetaaa(pretrained=__lowercase )
SCREAMING_SNAKE_CASE_ : str = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.Sequential(*__lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCamelCase ( self : Optional[Any],_A : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.pool(self.model(__lowercase ) )
SCREAMING_SNAKE_CASE_ : int = torch.flatten(__lowercase,start_dim=2 )
SCREAMING_SNAKE_CASE_ : List[Any] = out.transpose(1,2 ).contiguous()
return out # BxNx2048
class a__ ( lowercase__ ):
def __init__( self : Tuple,_A : List[str],_A : Tuple,_A : Union[str, Any],_A : str,_A : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = [json.loads(__lowercase ) for l in open(__lowercase )]
SCREAMING_SNAKE_CASE_ : Tuple = os.path.dirname(__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer
SCREAMING_SNAKE_CASE_ : Optional[Any] = labels
SCREAMING_SNAKE_CASE_ : Any = len(__lowercase )
SCREAMING_SNAKE_CASE_ : Dict = max_seq_length
SCREAMING_SNAKE_CASE_ : Union[str, Any] = transforms
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.data )
def __getitem__( self : int,_A : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"],add_special_tokens=__lowercase ) )
SCREAMING_SNAKE_CASE_ : List[Any] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE_ : Dict = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Dict = Image.open(os.path.join(self.data_dir,self.data[index]["img"] ) ).convert("RGB" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [len(row["sentence"] ) for row in batch]
SCREAMING_SNAKE_CASE_ : List[str] = len(__lowerCamelCase ), max(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.zeros(__lowerCamelCase , __lowerCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCamelCase , __lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ : List[str] = input_row["""sentence"""]
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.stack([row["image"] for row in batch] )
SCREAMING_SNAKE_CASE_ : Dict = torch.stack([row["label"] for row in batch] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.stack([row["image_start_token"] for row in batch] )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def _snake_case ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def _snake_case ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 18 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
def lowerCamelCase__ ( __lowerCamelCase : str ):
__UpperCAmelCase : Tuple = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
__UpperCAmelCase : Union[str, Any] = DetaConfig(
backbone_config=__lowerCamelCase , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__lowerCamelCase , with_box_refine=__lowerCamelCase , two_stage=__lowerCamelCase , )
# set labels
__UpperCAmelCase : List[str] = """huggingface/label-files"""
if "o365" in model_name:
__UpperCAmelCase : Any = 366
__UpperCAmelCase : Union[str, Any] = """object365-id2label.json"""
else:
__UpperCAmelCase : Tuple = 91
__UpperCAmelCase : str = """coco-detection-id2label.json"""
__UpperCAmelCase : Any = num_labels
__UpperCAmelCase : Union[str, Any] = json.load(open(cached_download(hf_hub_url(__lowerCamelCase , __lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
__UpperCAmelCase : Optional[Any] = {int(__lowerCamelCase ): v for k, v in idalabel.items()}
__UpperCAmelCase : List[str] = idalabel
__UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Dict = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.reduction.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.weight""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.0.body.layers.{i}.downsample.norm.bias""", f"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", f"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", f"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", f"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", f"""model.encoder.layers.{i}.self_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", f"""model.encoder.layers.{i}.self_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", f"""model.encoder.layers.{i}.self_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", f"""model.encoder.layers.{i}.self_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.weight""", f"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""model.encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""model.encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""model.encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""model.encoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""model.encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""model.encoder.layers.{i}.final_layer_norm.bias""") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", f"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", f"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", f"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.weight""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""model.decoder.layers.{i}.self_attn.out_proj.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""model.decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.weight""", f"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm2.bias""", f"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""model.decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""model.decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""model.decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""model.decoder.layers.{i}.fc2.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""model.decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""model.decoder.layers.{i}.final_layer_norm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase__ ( __lowerCamelCase : List[str] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = dct.pop(__lowerCamelCase )
__UpperCAmelCase : List[Any] = val
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ):
__UpperCAmelCase : List[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
__UpperCAmelCase : Any = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
__UpperCAmelCase : int = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" )
__UpperCAmelCase : str = state_dict.pop(f"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : str = in_proj_weight[:dim, :]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[: dim]
__UpperCAmelCase : List[str] = in_proj_weight[
dim : dim * 2, :
]
__UpperCAmelCase : Dict = in_proj_bias[
dim : dim * 2
]
__UpperCAmelCase : List[str] = in_proj_weight[
-dim :, :
]
__UpperCAmelCase : Optional[Any] = in_proj_bias[-dim :]
# fmt: on
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : Any ):
# transformer decoder self-attention layers
__UpperCAmelCase : Union[str, Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
__UpperCAmelCase : List[Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
__UpperCAmelCase : Union[str, Any] = state_dict.pop(f"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
__UpperCAmelCase : int = in_proj_weight[:hidden_size, :]
__UpperCAmelCase : Any = in_proj_bias[:hidden_size]
__UpperCAmelCase : Dict = in_proj_weight[
hidden_size : hidden_size * 2, :
]
__UpperCAmelCase : Union[str, Any] = in_proj_bias[hidden_size : hidden_size * 2]
__UpperCAmelCase : List[Any] = in_proj_weight[-hidden_size:, :]
__UpperCAmelCase : int = in_proj_bias[-hidden_size:]
def lowerCamelCase__ ( ):
__UpperCAmelCase : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCamelCase , stream=__lowerCamelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Any = get_deta_config(__lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
__UpperCAmelCase : Optional[Any] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Any = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
__UpperCAmelCase : int = torch.load(__lowerCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(__lowerCamelCase , param.shape )
# rename keys
__UpperCAmelCase : Dict = create_rename_keys(__lowerCamelCase )
for src, dest in rename_keys:
rename_key(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
read_in_swin_q_k_v(__lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCamelCase , __lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
__UpperCAmelCase : str = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = val
if "input_proj" in key:
__UpperCAmelCase : Any = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Union[str, Any] = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
__UpperCAmelCase : Dict = state_dict.pop(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = val
# finally, create HuggingFace model and load state dict
__UpperCAmelCase : Optional[Any] = DetaForObjectDetection(__lowerCamelCase )
model.load_state_dict(__lowerCamelCase )
model.eval()
__UpperCAmelCase : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(__lowerCamelCase )
# load image processor
__UpperCAmelCase : int = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
__UpperCAmelCase : Dict = prepare_img()
__UpperCAmelCase : Tuple = processor(images=__lowerCamelCase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[int] = encoding["""pixel_values"""]
__UpperCAmelCase : List[str] = model(pixel_values.to(__lowerCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
__UpperCAmelCase : int = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
__UpperCAmelCase : Optional[int] = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
__UpperCAmelCase : List[Any] = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__lowerCamelCase ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__lowerCamelCase ) , atol=1E-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" )
Path(__lowerCamelCase ).mkdir(exist_ok=__lowerCamelCase )
model.save_pretrained(__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f"""jozhang97/{model_name}""" )
processor.push_to_hub(f"""jozhang97/{model_name}""" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : List[Any] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 114 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
lowerCamelCase_ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : int = ["""pixel_values"""]
def __init__( self : Any , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : bool = True , snake_case_ : Dict[str, int] = None , snake_case_ : bool = True , snake_case_ : Union[int, float] = 1 / 255 , snake_case_ : bool = True , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = True , **snake_case_ : Optional[Any] , ):
super().__init__(**snake_case_ )
UpperCamelCase_: Union[str, Any] = size if size is not None else {"""shortest_edge""": 224}
UpperCamelCase_: Dict = get_size_dict(snake_case_ , default_to_square=snake_case_ )
UpperCamelCase_: Any = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase_: List[str] = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name="""crop_size""" )
UpperCamelCase_: Tuple = do_resize
UpperCamelCase_: int = size
UpperCamelCase_: Optional[int] = resample
UpperCamelCase_: Dict = do_center_crop
UpperCamelCase_: Dict = crop_size
UpperCamelCase_: Union[str, Any] = do_rescale
UpperCamelCase_: str = rescale_factor
UpperCamelCase_: Optional[Any] = do_normalize
UpperCamelCase_: Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_: Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_: List[Any] = do_convert_rgb
def lowerCAmelCase__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : PILImageResampling = PILImageResampling.BICUBIC , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
UpperCamelCase_: Union[str, Any] = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCamelCase_: Dict = get_resize_output_image_size(snake_case_ , size=size["""shortest_edge"""] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : List[str] , snake_case_ : np.ndarray , snake_case_ : Dict[str, int] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Dict , ):
UpperCamelCase_: str = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(snake_case_ , size=(size["""height"""], size["""width"""]) , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Dict , snake_case_ : np.ndarray , snake_case_ : Union[int, float] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : Union[str, Any] , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : int , snake_case_ : np.ndarray , snake_case_ : Union[float, List[float]] , snake_case_ : Union[float, List[float]] , snake_case_ : Optional[Union[str, ChannelDimension]] = None , **snake_case_ : List[str] , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Union[str, Any] , snake_case_ : ImageInput , snake_case_ : bool = None , snake_case_ : Dict[str, int] = None , snake_case_ : PILImageResampling = None , snake_case_ : bool = None , snake_case_ : int = None , snake_case_ : bool = None , snake_case_ : float = None , snake_case_ : bool = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : Optional[Union[float, List[float]]] = None , snake_case_ : bool = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **snake_case_ : List[Any] , ):
UpperCamelCase_: Any = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_: Tuple = size if size is not None else self.size
UpperCamelCase_: int = get_size_dict(snake_case_ , param_name="""size""" , default_to_square=snake_case_ )
UpperCamelCase_: List[Any] = resample if resample is not None else self.resample
UpperCamelCase_: Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_: int = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_: Any = get_size_dict(snake_case_ , param_name="""crop_size""" , default_to_square=snake_case_ )
UpperCamelCase_: Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_: List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_: Any = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_: Union[str, Any] = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_: List[Any] = image_std if image_std is not None else self.image_std
UpperCamelCase_: List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_: str = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_: Dict = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_: Optional[int] = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
UpperCamelCase_: Union[str, Any] = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
UpperCamelCase_: Any = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
UpperCamelCase_: Tuple = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
UpperCamelCase_: Optional[int] = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
UpperCamelCase_: Tuple = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
UpperCamelCase_: List[Any] = {"""pixel_values""": images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 223 |
def A__ ( lowerCamelCase ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 223 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def __lt__(self : Tuple , _lowerCAmelCase : Dict ):
return self[-1] < other[-1]
def __eq__(self : List[Any] , _lowerCAmelCase : Dict ):
return self[-1] == other[-1]
def __a ( UpperCAmelCase ) ->list:
"""simple docstring"""
A = []
# sort into stacks
for element in collection:
A = Stack([element] )
A = bisect_left(UpperCAmelCase , UpperCAmelCase )
if i != len(UpperCAmelCase ):
stacks[i].append(UpperCAmelCase )
else:
stacks.append(UpperCAmelCase )
# use a heap-based merge to merge stack efficiently
A = merge(*(reversed(UpperCAmelCase ) for stack in stacks) )
return collection
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase : List[str] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 258 |
'''simple docstring'''
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def __a ( UpperCAmelCase ) ->Tuple: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def __a ( UpperCAmelCase ) ->int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = 42
__lowerCAmelCase = 42
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
def A (self : Tuple ):
A = {}
A = []
A = 1
A = [1, 2]
A = {"""a""": 1, """b""": 2}
A = {"""a""": [1, 2], """b""": [3, 4]}
A = {"""a""": {"""1""": 1}, """b""": 2}
A = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
A = {}
A = []
A = 2
A = [2, 3]
A = {"""a""": 2, """b""": 3}
A = {"""a""": [2, 3], """b""": [4, 5]}
A = {"""a""": {"""1""": 2}, """b""": 3}
A = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase ) , _lowerCAmelCase )
A = 2
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
A = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
A = {"""a""": 2, """b""": 0, """c""": 2}
A = {
"""a""": np.eye(2 ).astype(_lowerCAmelCase ),
"""b""": np.zeros(3 ).astype(_lowerCAmelCase ),
"""c""": np.ones(2 ).astype(_lowerCAmelCase ),
}
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ) , _lowerCAmelCase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(_lowerCAmelCase , _lowerCAmelCase , map_numpy=_lowerCAmelCase , num_proc=_lowerCAmelCase ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(_lowerCAmelCase ): # can't pickle a local lambda
map_nested(lambda _lowerCAmelCase : x + 1 , _lowerCAmelCase , num_proc=_lowerCAmelCase )
def A (self : List[Any] ):
A = {"""a""": 1, """b""": 2}
A = {"""a""": 3, """b""": 4}
A = {"""a""": 5, """b""": 6}
A = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) ) , _lowerCAmelCase )
def A (self : Union[str, Any] ):
class __UpperCAmelCase :
'''simple docstring'''
__lowerCAmelCase = '''bar'''
A = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(_lowerCAmelCase , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->Any:
"""simple docstring"""
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
A = {f"""{i}""": i for i in range(UpperCAmelCase )}
A = map_nested(lambda UpperCAmelCase : x + 10 , UpperCAmelCase , num_proc=UpperCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __UpperCAmelCase ( A__ ):
'''simple docstring'''
@require_tf
def A (self : Dict ):
import tensorflow as tf
from tensorflow.keras import layers
A = layers.Dense(2 )
def gen_random_output():
A = tf.random.uniform((1, 3) )
return model(_lowerCAmelCase ).numpy()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_tensorflow=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def A (self : Tuple ):
import torch
def gen_random_output():
A = torch.nn.Linear(3 , 2 )
A = torch.rand(1 , 3 )
return model(_lowerCAmelCase ).detach().numpy()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
with temp_seed(42 , set_pytorch=_lowerCAmelCase ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def A (self : str ):
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
A = gen_random_output()
with temp_seed(42 ):
A = gen_random_output()
A = gen_random_output()
np.testing.assert_equal(_lowerCAmelCase , _lowerCAmelCase )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->List[Any]:
"""simple docstring"""
A = NestedDataStructure(UpperCAmelCase ).flatten()
assert output == expected_output
def __a ( ) ->Optional[Any]:
"""simple docstring"""
A = A(x=1 , y="""foobar""" )
A = {"""x""": 1, """y""": """foobar"""}
assert asdict(UpperCAmelCase ) == expected_output
A = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
A = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(UpperCAmelCase ) == expected_output
with pytest.raises(UpperCAmelCase ):
asdict([1, A(x=10 , y="""foo""" )] )
def __a ( UpperCAmelCase ) ->Tuple:
"""simple docstring"""
return text.split()
def __a ( UpperCAmelCase ) ->List[str]:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def __a ( ) ->Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
A = list(iflatmap_unordered(UpperCAmelCase , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(UpperCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
A = []
for yield_time, content in iflatmap_unordered(
UpperCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCAmelCase )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(UpperCAmelCase ) == 4
| 258 | 1 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
a_ = Mapping[str, np.ndarray]
a_ = Mapping[str, Any] # Is a nested dict.
a_ = 0.01
@dataclasses.dataclass(frozen=lowerCamelCase )
class __SCREAMING_SNAKE_CASE :
snake_case_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
snake_case_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
snake_case_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
snake_case_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
snake_case_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
snake_case_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
snake_case_ = None
# Templates used to generate this protein (prediction-only)
snake_case_ = None
# Chain corresponding to each parent
snake_case_ = None
def _a( UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =R'''(\[[A-Z]+\]\n)'''
SCREAMING_SNAKE_CASE__ : List[str] =[tag.strip() for tag in re.split(UpperCamelCase__, UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0]
SCREAMING_SNAKE_CASE__ : Iterator[Tuple[str, List[str]]] =zip(tags[0::2], [l.split('''\n''' ) for l in tags[1::2]] )
SCREAMING_SNAKE_CASE__ : List[str] =["N", "CA", "C"]
SCREAMING_SNAKE_CASE__ : int =None
SCREAMING_SNAKE_CASE__ : List[Any] =None
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
for g in groups:
if "[PRIMARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =g[1][0].strip()
for i in range(len(UpperCamelCase__ ) ):
if seq[i] not in residue_constants.restypes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''X''' # FIXME: strings are immutable
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array(
[residue_constants.restype_order.get(UpperCamelCase__, residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
SCREAMING_SNAKE_CASE__ : List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(UpperCamelCase__, g[1][axis].split() ) ) )
SCREAMING_SNAKE_CASE__ : List[str] =np.array(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
SCREAMING_SNAKE_CASE__ : List[str] =np.array(list(map({'''-''': 0, '''+''': 1}.get, g[1][0].strip() ) ) )
SCREAMING_SNAKE_CASE__ : List[Any] =np.zeros(
(
len(UpperCamelCase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : List[Any] =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=UpperCamelCase__, atom_mask=UpperCamelCase__, aatype=UpperCamelCase__, residue_index=np.arange(len(UpperCamelCase__ ) ), b_factors=UpperCamelCase__, )
def _a( UpperCamelCase__ : Protein, UpperCamelCase__ : int = 0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : int =prot.remark
if remark is not None:
pdb_headers.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE__ : str =prot.parents
SCREAMING_SNAKE_CASE__ : List[Any] =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : Any =[p for i, p in zip(UpperCamelCase__, UpperCamelCase__ ) if i == chain_id]
if parents is None or len(UpperCamelCase__ ) == 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =['''N/A''']
pdb_headers.append(f"PARENT {' '.join(UpperCamelCase__ )}" )
return pdb_headers
def _a( UpperCamelCase__ : Protein, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : Any =pdb_str.split('''\n''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =prot.remark
if remark is not None:
out_pdb_lines.append(f"REMARK {remark}" )
SCREAMING_SNAKE_CASE__ : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
SCREAMING_SNAKE_CASE__ : Tuple =[]
if prot.parents_chain_index is not None:
SCREAMING_SNAKE_CASE__ : Dict[str, List[str]] ={}
for p, i in zip(prot.parents, prot.parents_chain_index ):
parent_dict.setdefault(str(UpperCamelCase__ ), [] )
parent_dict[str(UpperCamelCase__ )].append(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : str =max([int(UpperCamelCase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent_dict.get(str(UpperCamelCase__ ), ['''N/A'''] )
parents_per_chain.append(UpperCamelCase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
SCREAMING_SNAKE_CASE__ : List[Any] =[['''N/A''']]
def make_parent_line(UpperCamelCase__ : Sequence[str] ) -> str:
return f"PARENT {' '.join(UpperCamelCase__ )}"
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
SCREAMING_SNAKE_CASE__ : str =0
for i, l in enumerate(UpperCamelCase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(UpperCamelCase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parents_per_chain[chain_counter]
else:
SCREAMING_SNAKE_CASE__ : int =['''N/A''']
out_pdb_lines.append(make_parent_line(UpperCamelCase__ ) )
return "\n".join(UpperCamelCase__ )
def _a( UpperCamelCase__ : Protein ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] =residue_constants.restypes + ['''X''']
def res_atoa(UpperCamelCase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r], '''UNK''' )
SCREAMING_SNAKE_CASE__ : str =residue_constants.atom_types
SCREAMING_SNAKE_CASE__ : List[str] =[]
SCREAMING_SNAKE_CASE__ : Tuple =prot.atom_mask
SCREAMING_SNAKE_CASE__ : Tuple =prot.aatype
SCREAMING_SNAKE_CASE__ : Optional[int] =prot.atom_positions
SCREAMING_SNAKE_CASE__ : List[Any] =prot.residue_index.astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Any =prot.b_factors
SCREAMING_SNAKE_CASE__ : Optional[int] =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
SCREAMING_SNAKE_CASE__ : List[str] =get_pdb_headers(UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0:
pdb_lines.extend(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : Tuple =aatype.shape[0]
SCREAMING_SNAKE_CASE__ : str =1
SCREAMING_SNAKE_CASE__ : Any =0
SCREAMING_SNAKE_CASE__ : Tuple =string.ascii_uppercase
SCREAMING_SNAKE_CASE__ : Optional[int] =None
# Add all atom sites.
for i in range(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE__ : Any =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(UpperCamelCase__, atom_positions[i], atom_mask[i], b_factors[i] ):
if mask < 0.5:
continue
SCREAMING_SNAKE_CASE__ : int ='''ATOM'''
SCREAMING_SNAKE_CASE__ : str =atom_name if len(UpperCamelCase__ ) == 4 else f" {atom_name}"
SCREAMING_SNAKE_CASE__ : str =''''''
SCREAMING_SNAKE_CASE__ : Optional[Any] =''''''
SCREAMING_SNAKE_CASE__ : Any =1.0_0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =atom_name[0] # Protein supports only C, N, O, S, this works.
SCREAMING_SNAKE_CASE__ : str =''''''
SCREAMING_SNAKE_CASE__ : List[str] ='''A'''
if chain_index is not None:
SCREAMING_SNAKE_CASE__ : Tuple =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
SCREAMING_SNAKE_CASE__ : int =(
f"{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"
f"{res_name_a:>3} {chain_tag:>1}"
f"{residue_index[i]:>4}{insertion_code:>1} "
f"{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"
f"{occupancy:>6.2f}{b_factor:>6.2f} "
f"{element:>2}{charge:>2}"
)
pdb_lines.append(UpperCamelCase__ )
atom_index += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =True
SCREAMING_SNAKE_CASE__ : Tuple =chain_index[i + 1]
if should_terminate:
# Close the chain.
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''TER'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =(
f"{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"
)
pdb_lines.append(UpperCamelCase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(UpperCamelCase__, UpperCamelCase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(UpperCamelCase__ )
def _a( UpperCamelCase__ : Protein ):
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def _a( UpperCamelCase__ : FeatureDict, UpperCamelCase__ : ModelOutput, UpperCamelCase__ : Optional[np.ndarray] = None, UpperCamelCase__ : Optional[np.ndarray] = None, UpperCamelCase__ : Optional[str] = None, UpperCamelCase__ : Optional[Sequence[str]] = None, UpperCamelCase__ : Optional[Sequence[int]] = None, ):
'''simple docstring'''
return Protein(
aatype=features['''aatype'''], atom_positions=result['''final_atom_positions'''], atom_mask=result['''final_atom_mask'''], residue_index=features['''residue_index'''] + 1, b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ), chain_index=UpperCamelCase__, remark=UpperCamelCase__, parents=UpperCamelCase__, parents_chain_index=UpperCamelCase__, ) | 222 |
'''simple docstring'''
def _a( UpperCamelCase__ : Optional[Any]=2_8_1_2_3 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =[1] * (limit + 1)
for i in range(2, int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1, limit // i + 1 ):
sum_divs[k * i] += k + i
SCREAMING_SNAKE_CASE__ : List[str] =set()
SCREAMING_SNAKE_CASE__ : str =0
for n in range(1, limit + 1 ):
if sum_divs[n] > n:
abundants.add(UpperCamelCase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 222 | 1 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class snake_case_( a__ ):
def __init__( self : int , UpperCamelCase_ : VQModel , UpperCamelCase_ : UNetaDModel , UpperCamelCase_ : DDIMScheduler ):
super().__init__()
self.register_modules(vqvae=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ )
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , **UpperCamelCase_ : Optional[int] , ):
lowerCAmelCase : Dict = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=UpperCamelCase_ , )
lowerCAmelCase : Optional[int] = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(UpperCamelCase_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
lowerCAmelCase : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase : List[str] = {}
if accepts_eta:
lowerCAmelCase : List[Any] = eta
for t in self.progress_bar(self.scheduler.timesteps ):
lowerCAmelCase : List[str] = self.scheduler.scale_model_input(UpperCamelCase_ , UpperCamelCase_ )
# predict the noise residual
lowerCAmelCase : Tuple = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase : Optional[Any] = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ).prev_sample
# decode the image latents with the VAE
lowerCAmelCase : Dict = self.vqvae.decode(UpperCamelCase_ ).sample
lowerCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase_ )
| 60 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
snake_case__ : int = logging.get_logger(__name__)
def _snake_case ( _snake_case : Union[str, Any] ):
lowerCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith('''module.encoder''' ):
lowerCAmelCase : Union[str, Any] = key.replace('''module.encoder''' , '''glpn.encoder''' )
if key.startswith('''module.decoder''' ):
lowerCAmelCase : str = key.replace('''module.decoder''' , '''decoder.stages''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase : Union[str, Any] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
lowerCAmelCase : str = key.replace(f'''patch_embed{idx}''' , f'''patch_embeddings.{int(_snake_case )-1}''' )
if "norm" in key:
lowerCAmelCase : str = key.replace('''norm''' , '''layer_norm''' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase : Optional[int] = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )]
lowerCAmelCase : List[str] = key.replace(f'''layer_norm{idx}''' , f'''layer_norm.{int(_snake_case )-1}''' )
if "layer_norm1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''layer_norm1''' , '''layer_norm_1''' )
if "layer_norm2" in key:
lowerCAmelCase : Any = key.replace('''layer_norm2''' , '''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase : Tuple = key[key.find('''block''' ) + len('''block''' )]
lowerCAmelCase : Tuple = key.replace(f'''block{idx}''' , f'''block.{int(_snake_case )-1}''' )
if "attn.q" in key:
lowerCAmelCase : Optional[Any] = key.replace('''attn.q''' , '''attention.self.query''' )
if "attn.proj" in key:
lowerCAmelCase : Dict = key.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in key:
lowerCAmelCase : List[str] = key.replace('''attn''' , '''attention.self''' )
if "fc1" in key:
lowerCAmelCase : List[Any] = key.replace('''fc1''' , '''dense1''' )
if "fc2" in key:
lowerCAmelCase : Optional[Any] = key.replace('''fc2''' , '''dense2''' )
if "linear_pred" in key:
lowerCAmelCase : List[Any] = key.replace('''linear_pred''' , '''classifier''' )
if "linear_fuse" in key:
lowerCAmelCase : Optional[Any] = key.replace('''linear_fuse.conv''' , '''linear_fuse''' )
lowerCAmelCase : int = key.replace('''linear_fuse.bn''' , '''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase : Optional[Any] = key[key.find('''linear_c''' ) + len('''linear_c''' )]
lowerCAmelCase : int = key.replace(f'''linear_c{idx}''' , f'''linear_c.{int(_snake_case )-1}''' )
if "bot_conv" in key:
lowerCAmelCase : str = key.replace('''bot_conv''' , '''0.convolution''' )
if "skip_conv1" in key:
lowerCAmelCase : int = key.replace('''skip_conv1''' , '''1.convolution''' )
if "skip_conv2" in key:
lowerCAmelCase : str = key.replace('''skip_conv2''' , '''2.convolution''' )
if "fusion1" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''fusion1''' , '''1.fusion''' )
if "fusion2" in key:
lowerCAmelCase : Any = key.replace('''fusion2''' , '''2.fusion''' )
if "fusion3" in key:
lowerCAmelCase : List[Any] = key.replace('''fusion3''' , '''3.fusion''' )
if "fusion" in key and "conv" in key:
lowerCAmelCase : Union[str, Any] = key.replace('''conv''' , '''convolutional_layer''' )
if key.startswith('''module.last_layer_depth''' ):
lowerCAmelCase : Optional[Any] = key.replace('''module.last_layer_depth''' , '''head.head''' )
lowerCAmelCase : Union[str, Any] = value
return new_state_dict
def _snake_case ( _snake_case : Optional[Any] , _snake_case : str ):
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase : int = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowerCAmelCase : Optional[int] = state_dict.pop(f'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase : str = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase : Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase : Dict = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase : List[str] = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ):
lowerCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCAmelCase : str = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
@torch.no_grad()
def _snake_case ( _snake_case : Dict , _snake_case : Dict , _snake_case : Union[str, Any]=False , _snake_case : List[str]=None ):
lowerCAmelCase : Optional[int] = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase : Tuple = prepare_img()
lowerCAmelCase : Dict = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values
logger.info('''Converting model...''' )
# load original state dict
lowerCAmelCase : List[str] = torch.load(_snake_case , map_location=torch.device('''cpu''' ) )
# rename keys
lowerCAmelCase : Tuple = rename_keys(_snake_case )
# key and value matrices need special treatment
read_in_k_v(_snake_case , _snake_case )
# create HuggingFace model and load state dict
lowerCAmelCase : str = GLPNForDepthEstimation(_snake_case )
model.load_state_dict(_snake_case )
model.eval()
# forward pass
lowerCAmelCase : Union[str, Any] = model(_snake_case )
lowerCAmelCase : int = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase : str = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase : str = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f'''Unknown model name: {model_name}''' )
lowerCAmelCase : List[Any] = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _snake_case , atol=1E-4 )
print('''Looks ok!''' )
# finally, push to hub if required
if push_to_hub:
logger.info('''Pushing model and image processor to the hub...''' )
model.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(_snake_case , _snake_case ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
snake_case__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''',
default=None,
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
parser.add_argument(
'''--model_name''',
default='''glpn-kitti''',
type=str,
help='''Name of the model in case you\'re pushing to the hub.''',
)
snake_case__ : List[str] = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 60 | 1 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
snake_case__ : int = True
except ImportError:
snake_case__ : Optional[Any] = False
try:
from torch.hub import _get_torch_home
snake_case__ : Any = _get_torch_home()
except ImportError:
snake_case__ : List[Any] = os.path.expanduser(
os.getenv('TORCH_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'torch'))
)
snake_case__ : int = os.path.join(torch_cache_home, 'transformers')
snake_case__ : Optional[int] = 'https://cdn.huggingface.co'
snake_case__ : Dict = 'https://s3.amazonaws.com/models.huggingface.co/bert'
snake_case__ : Tuple = '/'.join(str(Path(__file__).resolve()).split('/')[:-1])
snake_case__ : Dict = os.path.join(PATH, 'config.yaml')
snake_case__ : int = os.path.join(PATH, 'attributes.txt')
snake_case__ : Dict = os.path.join(PATH, 'objects.txt')
snake_case__ : Optional[Any] = os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', default_cache_path)
snake_case__ : Tuple = os.getenv('PYTORCH_TRANSFORMERS_CACHE', PYTORCH_PRETRAINED_BERT_CACHE)
snake_case__ : Tuple = os.getenv('TRANSFORMERS_CACHE', PYTORCH_TRANSFORMERS_CACHE)
snake_case__ : Dict = 'pytorch_model.bin'
snake_case__ : Union[str, Any] = 'config.yaml'
def _a ( lowerCamelCase: List[Any]=OBJECTS , lowerCamelCase: Dict=ATTRIBUTES ) -> Any:
'''simple docstring'''
__A = []
with open(lowerCamelCase ) as f:
for object in f.readlines():
vg_classes.append(object.split(''',''' )[0].lower().strip() )
__A = []
with open(lowerCamelCase ) as f:
for object in f.readlines():
vg_attrs.append(object.split(''',''' )[0].lower().strip() )
return vg_classes, vg_attrs
def _a ( lowerCamelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
__A = OrderedDict()
with open(lowerCamelCase , '''rb''' ) as f:
__A = pkl.load(lowerCamelCase )['''model''']
for k in copy.deepcopy(list(ckp.keys() ) ):
__A = ckp.pop(lowerCamelCase )
if isinstance(lowerCamelCase , np.ndarray ):
__A = torch.tensor(lowerCamelCase )
else:
assert isinstance(lowerCamelCase , torch.tensor ), type(lowerCamelCase )
__A = v
return r
class A_ :
lowerCAmelCase__ = {}
def __init__(self :List[str] , _UpperCamelCase :dict , _UpperCamelCase :str = "root" , _UpperCamelCase :Optional[int]=0 )-> Dict:
__A = name
__A = level
__A = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
__A = copy.deepcopy(_UpperCamelCase )
__A = copy.deepcopy(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__A = Config(_UpperCamelCase , name=_UpperCamelCase , level=level + 1 )
__A = v
setattr(self , _UpperCamelCase , _UpperCamelCase )
__A = d
def __repr__(self :int )-> List[str]:
return str(list((self._pointer.keys()) ) )
def __setattr__(self :List[Any] , _UpperCamelCase :Dict , _UpperCamelCase :List[Any] )-> Any:
__A = val
__A = val
__A = key.split('''.''' )
__A = len(_UpperCamelCase ) - 1
__A = self._pointer
if len(_UpperCamelCase ) > 1:
for i, l in enumerate(_UpperCamelCase ):
if hasattr(self , _UpperCamelCase ) and isinstance(getattr(self , _UpperCamelCase ) , _UpperCamelCase ):
setattr(getattr(self , _UpperCamelCase ) , '''.'''.join(levels[i:] ) , _UpperCamelCase )
if l == last_level:
__A = val
else:
__A = pointer[l]
def _lowerCAmelCase (self :Union[str, Any] )-> Tuple:
return self._pointer
def _lowerCAmelCase (self :int , _UpperCamelCase :int , _UpperCamelCase :int )-> Dict:
with open(f"""{file_name}""" , '''w''' ) as stream:
dump(_UpperCamelCase , _UpperCamelCase )
def _lowerCAmelCase (self :int , _UpperCamelCase :List[str] , _UpperCamelCase :List[str] )-> Dict:
with open(f"""{file_name}""" , '''w''' ) as stream:
json.dump(_UpperCamelCase , _UpperCamelCase )
@staticmethod
def _lowerCAmelCase (_UpperCamelCase :List[str] )-> Dict:
with open(_UpperCamelCase ) as stream:
__A = load(_UpperCamelCase , Loader=_UpperCamelCase )
return data
def __str__(self :int )-> Union[str, Any]:
__A = ''' '''
if self._name != "root":
__A = f"""{t * (self._level-1)}{self._name}:\n"""
else:
__A = ''''''
__A = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(_UpperCamelCase , _UpperCamelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(_UpperCamelCase ).__name__})\n"""
__A = level
return r[:-1]
@classmethod
def _lowerCAmelCase (cls :str , _UpperCamelCase :str , **_UpperCamelCase :List[str] )-> Dict:
__A , __A = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
return cls(_UpperCamelCase )
@classmethod
def _lowerCAmelCase (cls :int , _UpperCamelCase :str , **_UpperCamelCase :List[Any] )-> Optional[int]:
__A = kwargs.pop('''cache_dir''' , _UpperCamelCase )
__A = kwargs.pop('''force_download''' , _UpperCamelCase )
__A = kwargs.pop('''resume_download''' , _UpperCamelCase )
__A = kwargs.pop('''proxies''' , _UpperCamelCase )
__A = kwargs.pop('''local_files_only''' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(_UpperCamelCase , _UpperCamelCase )
elif os.path.isfile(_UpperCamelCase ) or is_remote_url(_UpperCamelCase ):
__A = pretrained_model_name_or_path
else:
__A = hf_bucket_url(_UpperCamelCase , filename=_UpperCamelCase , use_cdn=_UpperCamelCase )
try:
# Load from URL or cache if already cached
__A = cached_path(
_UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , proxies=_UpperCamelCase , resume_download=_UpperCamelCase , local_files_only=_UpperCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
__A = Config.load_yaml(_UpperCamelCase )
except EnvironmentError:
__A = '''Can\'t load config for'''
raise EnvironmentError(_UpperCamelCase )
if resolved_config_file == config_file:
print('''loading configuration file from path''' )
else:
print('''loading configuration file cache''' )
return Config.load_yaml(_UpperCamelCase ), kwargs
def _a ( lowerCamelCase: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
__A = torch.load('''dump.pt''' , map_location=in_tensor.device )
__A = in_tensor.numpy()
__A = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(lowerCamelCase , lowerCamelCase , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(lowerCamelCase , lowerCamelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_00:.4f} %"""
" element-wise mismatch"
)
raise Exception('''tensors are all good''' )
# Hugging face functions below
def _a ( lowerCamelCase: Optional[int] ) -> int:
'''simple docstring'''
__A = urlparse(lowerCamelCase )
return parsed.scheme in ("http", "https")
def _a ( lowerCamelCase: str , lowerCamelCase: str , lowerCamelCase: Any=True ) -> str:
'''simple docstring'''
__A = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
__A = '''/''' not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: Optional[int] , lowerCamelCase: List[str]=None , lowerCamelCase: List[str]=0 , lowerCamelCase: List[Any]=None , ) -> Optional[int]:
'''simple docstring'''
__A = '''python/{}'''.format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowerCamelCase , lowerCamelCase ):
ua += "; " + "; ".join('''{}/{}'''.format(lowerCamelCase , lowerCamelCase ) for k, v in user_agent.items() )
elif isinstance(lowerCamelCase , lowerCamelCase ):
ua += "; " + user_agent
__A = {'''user-agent''': ua}
if resume_size > 0:
__A = '''bytes=%d-''' % (resume_size,)
__A = requests.get(lowerCamelCase , stream=lowerCamelCase , proxies=lowerCamelCase , headers=lowerCamelCase )
if response.status_code == 4_16: # Range not satisfiable
return
__A = response.headers.get('''Content-Length''' )
__A = resume_size + int(lowerCamelCase ) if content_length is not None else None
__A = tqdm(
unit='''B''' , unit_scale=lowerCamelCase , total=lowerCamelCase , initial=lowerCamelCase , desc='''Downloading''' , )
for chunk in response.iter_content(chunk_size=10_24 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowerCamelCase ) )
temp_file.write(lowerCamelCase )
progress.close()
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: int=None , lowerCamelCase: Dict=False , lowerCamelCase: Dict=None , lowerCamelCase: int=10 , lowerCamelCase: int=False , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Any=False , ) -> Optional[Any]:
'''simple docstring'''
if cache_dir is None:
__A = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase , lowerCamelCase ):
__A = str(lowerCamelCase )
os.makedirs(lowerCamelCase , exist_ok=lowerCamelCase )
__A = None
if not local_files_only:
try:
__A = requests.head(lowerCamelCase , allow_redirects=lowerCamelCase , proxies=lowerCamelCase , timeout=lowerCamelCase )
if response.status_code == 2_00:
__A = response.headers.get('''ETag''' )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
__A = url_to_filename(lowerCamelCase , lowerCamelCase )
# get cache path to put the file
__A = os.path.join(lowerCamelCase , lowerCamelCase )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowerCamelCase ):
return cache_path
else:
__A = [
file
for file in fnmatch.filter(os.listdir(lowerCamelCase ) , filename + '''.*''' )
if not file.endswith('''.json''' ) and not file.endswith('''.lock''' )
]
if len(lowerCamelCase ) > 0:
return os.path.join(lowerCamelCase , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
'''Cannot find the requested files in the cached path and outgoing traffic has been'''
''' disabled. To enable model look-ups and downloads online, set \'local_files_only\''''
''' to False.''' )
return None
# From now on, etag is not None.
if os.path.exists(lowerCamelCase ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
__A = cache_path + '''.lock'''
with FileLock(lowerCamelCase ):
# If the download just completed while the lock was activated.
if os.path.exists(lowerCamelCase ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
__A = cache_path + '''.incomplete'''
@contextmanager
def _resumable_file_manager():
with open(lowerCamelCase , '''a+b''' ) as f:
yield f
__A = _resumable_file_manager
if os.path.exists(lowerCamelCase ):
__A = os.stat(lowerCamelCase ).st_size
else:
__A = 0
else:
__A = partial(tempfile.NamedTemporaryFile , dir=lowerCamelCase , delete=lowerCamelCase )
__A = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
'''%s not found in cache or force_download set to True, downloading to %s''' , lowerCamelCase , temp_file.name , )
http_get(
lowerCamelCase , lowerCamelCase , proxies=lowerCamelCase , resume_size=lowerCamelCase , user_agent=lowerCamelCase , )
os.replace(temp_file.name , lowerCamelCase )
__A = {'''url''': url, '''etag''': etag}
__A = cache_path + '''.json'''
with open(lowerCamelCase , '''w''' ) as meta_file:
json.dump(lowerCamelCase , lowerCamelCase )
return cache_path
def _a ( lowerCamelCase: int , lowerCamelCase: Optional[Any]=None ) -> Any:
'''simple docstring'''
__A = url.encode('''utf-8''' )
__A = shaaaa(lowerCamelCase )
__A = url_hash.hexdigest()
if etag:
__A = etag.encode('''utf-8''' )
__A = shaaaa(lowerCamelCase )
filename += "." + etag_hash.hexdigest()
if url.endswith('''.h5''' ):
filename += ".h5"
return filename
def _a ( lowerCamelCase: List[Any] , lowerCamelCase: List[str]=None , lowerCamelCase: List[Any]=False , lowerCamelCase: Optional[Any]=None , lowerCamelCase: Union[str, Any]=False , lowerCamelCase: List[str]=None , lowerCamelCase: Any=False , lowerCamelCase: Any=False , lowerCamelCase: Any=False , ) -> Tuple:
'''simple docstring'''
if cache_dir is None:
__A = TRANSFORMERS_CACHE
if isinstance(lowerCamelCase , lowerCamelCase ):
__A = str(lowerCamelCase )
if isinstance(lowerCamelCase , lowerCamelCase ):
__A = str(lowerCamelCase )
if is_remote_url(lowerCamelCase ):
# URL, so get it from the cache (downloading if necessary)
__A = get_from_cache(
lowerCamelCase , cache_dir=lowerCamelCase , force_download=lowerCamelCase , proxies=lowerCamelCase , resume_download=lowerCamelCase , user_agent=lowerCamelCase , local_files_only=lowerCamelCase , )
elif os.path.exists(lowerCamelCase ):
# File, and it exists.
__A = url_or_filename
elif urlparse(lowerCamelCase ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError('''file {} not found'''.format(lowerCamelCase ) )
else:
# Something unknown
raise ValueError('''unable to parse {} as a URL or as a local path'''.format(lowerCamelCase ) )
if extract_compressed_file:
if not is_zipfile(lowerCamelCase ) and not tarfile.is_tarfile(lowerCamelCase ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
__A , __A = os.path.split(lowerCamelCase )
__A = output_file.replace('''.''' , '''-''' ) + '''-extracted'''
__A = os.path.join(lowerCamelCase , lowerCamelCase )
if os.path.isdir(lowerCamelCase ) and os.listdir(lowerCamelCase ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
__A = output_path + '''.lock'''
with FileLock(lowerCamelCase ):
shutil.rmtree(lowerCamelCase , ignore_errors=lowerCamelCase )
os.makedirs(lowerCamelCase )
if is_zipfile(lowerCamelCase ):
with ZipFile(lowerCamelCase , '''r''' ) as zip_file:
zip_file.extractall(lowerCamelCase )
zip_file.close()
elif tarfile.is_tarfile(lowerCamelCase ):
__A = tarfile.open(lowerCamelCase )
tar_file.extractall(lowerCamelCase )
tar_file.close()
else:
raise EnvironmentError('''Archive format of {} could not be identified'''.format(lowerCamelCase ) )
return output_path_extracted
return output_path
def _a ( lowerCamelCase: Union[str, Any] , lowerCamelCase: Optional[int]="," ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
with open(lowerCamelCase ) as f:
__A = eval(f.read() )
else:
__A = requests.get(lowerCamelCase )
try:
__A = requests.json()
except Exception:
__A = req.content.decode()
assert data is not None, "could not connect"
try:
__A = eval(lowerCamelCase )
except Exception:
__A = data.split('''\n''' )
req.close()
return data
def _a ( lowerCamelCase: Tuple ) -> int:
'''simple docstring'''
__A = requests.get(lowerCamelCase )
__A = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _a ( lowerCamelCase: str ) -> Tuple:
'''simple docstring'''
__A = url.split('''/''' )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowerCamelCase )
with open(lowerCamelCase , '''rb''' ) as stream:
__A = pkl.load(lowerCamelCase )
__A = weights.pop('''model''' )
__A = {}
for k, v in model.items():
__A = torch.from_numpy(lowerCamelCase )
if "running_var" in k:
__A = torch.tensor([0] )
__A = k.replace('''running_var''' , '''num_batches_tracked''' )
__A = zero
return new
def _a ( ) -> Optional[int]:
'''simple docstring'''
print(F"""{os.path.abspath(os.path.join(lowerCamelCase , os.pardir ) )}/demo.ipynb""" )
def _a ( lowerCamelCase: Optional[Any] , lowerCamelCase: Tuple="RGB" ) -> Tuple:
'''simple docstring'''
assert isinstance(lowerCamelCase , lowerCamelCase )
if os.path.isfile(lowerCamelCase ):
__A = cva.imread(lowerCamelCase )
else:
__A = get_image_from_url(lowerCamelCase )
assert img is not None, F"""could not connect to: {im}"""
__A = cva.cvtColor(lowerCamelCase , cva.COLOR_BGR2RGB )
if input_format == "RGB":
__A = img[:, :, ::-1]
return img
def _a ( lowerCamelCase: List[str] , lowerCamelCase: Optional[int]=1 ) -> str:
'''simple docstring'''
return (images[i : i + batch] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase ))
| 361 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case__ : List[str] = logging.get_logger(__name__)
snake_case__ : Dict = {'vocab_file': 'vocab.txt'}
snake_case__ : Dict = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
snake_case__ : Optional[int] = {
'openbmb/cpm-ant-10b': 1024,
}
def _a ( lowerCamelCase: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
__A = collections.OrderedDict()
with open(lowerCamelCase , '''r''' , encoding='''utf-8''' ) as reader:
__A = reader.readlines()
for index, token in enumerate(lowerCamelCase ):
__A = token.rstrip('''\n''' )
__A = index
return vocab
class A_ ( _lowerCamelCase ):
def __init__(self :Any , _UpperCamelCase :Dict , _UpperCamelCase :Optional[int]="<unk>" , _UpperCamelCase :List[str]=200 )-> List[str]:
__A = vocab
__A = unk_token
__A = max_input_chars_per_word
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[Any] )-> str:
__A = list(_UpperCamelCase )
if len(_UpperCamelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
__A = 0
__A = []
while start < len(_UpperCamelCase ):
__A = len(_UpperCamelCase )
__A = None
while start < end:
__A = ''''''.join(chars[start:end] )
if substr in self.vocab:
__A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_UpperCamelCase )
__A = end
return sub_tokens
class A_ ( _lowerCamelCase ):
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = False
def __init__(self :str , _UpperCamelCase :Union[str, Any] , _UpperCamelCase :Any="<d>" , _UpperCamelCase :List[str]="</d>" , _UpperCamelCase :Dict="<s>" , _UpperCamelCase :Optional[Any]="</s>" , _UpperCamelCase :Optional[int]="<pad>" , _UpperCamelCase :List[str]="<unk>" , _UpperCamelCase :str="</n>" , _UpperCamelCase :Optional[int]="</_>" , _UpperCamelCase :Optional[Any]="left" , **_UpperCamelCase :Any , )-> Union[str, Any]:
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_UpperCamelCase , eod_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , pad_token=_UpperCamelCase , unk_token=_UpperCamelCase , line_token=_UpperCamelCase , space_token=_UpperCamelCase , padding_side=_UpperCamelCase , **_UpperCamelCase , )
__A = bod_token
__A = eod_token
__A = load_vocab(_UpperCamelCase )
__A = self.encoder[space_token]
__A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
__A = {v: k for k, v in self.encoder.items()}
__A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def _lowerCAmelCase (self :Union[str, Any] )-> Dict:
return self.encoder[self.bod_token]
@property
def _lowerCAmelCase (self :Optional[int] )-> Dict:
return self.encoder[self.eod_token]
@property
def _lowerCAmelCase (self :Any )-> List[Any]:
return self.encoder["\n"]
@property
def _lowerCAmelCase (self :List[str] )-> int:
return len(self.encoder )
def _lowerCAmelCase (self :List[str] )-> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :Dict )-> Union[str, Any]:
__A = []
for x in jieba.cut(_UpperCamelCase , cut_all=_UpperCamelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_UpperCamelCase ) )
return output_tokens
def _lowerCAmelCase (self :str , _UpperCamelCase :int , **_UpperCamelCase :List[str] )-> Tuple:
__A = [i for i in token_ids if i >= 0]
__A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_UpperCamelCase , **_UpperCamelCase )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Optional[int] )-> List[str]:
return token in self.encoder
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[str] )-> str:
return "".join(_UpperCamelCase )
def _lowerCAmelCase (self :Optional[int] , _UpperCamelCase :List[Any] )-> List[Any]:
return self.encoder.get(_UpperCamelCase , self.encoder.get(self.unk_token ) )
def _lowerCAmelCase (self :Any , _UpperCamelCase :Tuple )-> int:
return self.decoder.get(_UpperCamelCase , self.unk_token )
def _lowerCAmelCase (self :List[str] , _UpperCamelCase :str , _UpperCamelCase :Optional[str] = None )-> Tuple[str]:
if os.path.isdir(_UpperCamelCase ):
__A = os.path.join(
_UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
__A = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
__A = 0
if " " in self.encoder:
__A = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
__A = self.encoder['''\n''']
del self.encoder["\n"]
__A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _UpperCamelCase : x[1] ) )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
__A = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def _lowerCAmelCase (self :Union[str, Any] , _UpperCamelCase :List[int] , _UpperCamelCase :List[int] = None )-> List[int]:
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def _lowerCAmelCase (self :List[Any] , _UpperCamelCase :List[int] , _UpperCamelCase :Optional[List[int]] = None , _UpperCamelCase :bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(_UpperCamelCase )) + [1] + ([0] * len(_UpperCamelCase ))
return [1] + ([0] * len(_UpperCamelCase ))
| 250 | 0 |
from typing import Any
class A__:
"""simple docstring"""
def __init__( self , _lowercase ) -> List[str]:
a_ : List[str] = data
a_ : Optional[int] = None
def __repr__( self ) -> str:
return F'''Node({self.data})'''
class A__:
"""simple docstring"""
def __init__( self ) -> Optional[Any]:
a_ : Dict = None
def __iter__( self ) -> Any:
a_ : Optional[Any] = self.head
while node:
yield node.data
a_ : Union[str, Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(_lowercase ) for item in self] )
def __getitem__( self , _lowercase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _lowercase , _lowercase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
a_ : Optional[Any] = self.head
for _ in range(_lowercase ):
a_ : List[str] = current.next
a_ : Any = data
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(len(self ) , _lowercase )
def UpperCamelCase__ ( self , _lowercase ) -> None:
self.insert_nth(0 , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
a_ : Optional[int] = Node(_lowercase )
if self.head is None:
a_ : int = new_node
elif index == 0:
a_ : List[Any] = self.head # link new_node to head
a_ : Any = new_node
else:
a_ : Optional[int] = self.head
for _ in range(index - 1 ):
a_ : Optional[int] = temp.next
a_ : Optional[int] = temp.next
a_ : int = new_node
def UpperCamelCase__ ( self ) -> None: # print every node data
print(self )
def UpperCamelCase__ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self , _lowercase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
a_ : Optional[int] = self.head # default first node
if index == 0:
a_ : List[Any] = self.head.next
else:
a_ : List[Any] = self.head
for _ in range(index - 1 ):
a_ : List[Any] = temp.next
a_ : Any = temp.next
a_ : Any = temp.next.next
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
return self.head is None
def UpperCamelCase__ ( self ) -> None:
a_ : Any = None
a_ : Union[str, Any] = self.head
while current:
# Store the current node's next node.
a_ : Dict = current.next
# Make the current node's next point backwards
a_ : Optional[Any] = prev
# Make the previous node be the current node
a_ : Optional[int] = current
# Make the current node the next node (to progress iteration)
a_ : List[str] = next_node
# Return prev in order to put the head at the end
a_ : Dict = prev
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Union[str, Any] = LinkedList()
assert linked_list.is_empty() is True
assert str(a__) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0):
assert len(a__) == i
linked_list.insert_nth(a__ , i + 1)
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_1))
linked_list.insert_head(0)
linked_list.insert_tail(1_1)
assert str(a__) == "->".join(str(a__) for i in range(0 , 1_2))
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(a__) == 9
assert str(a__) == "->".join(str(a__) for i in range(1 , 1_0))
assert all(linked_list[i] == i + 1 for i in range(0 , 9)) is True
for i in range(0 , 9):
a_ : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9)) is True
linked_list.reverse()
assert str(a__) == "->".join(str(a__) for i in range(-8 , 1))
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : int = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2),
"""dlrow olleH""",
7,
5_5_5_5,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(1_0),
None,
None,
12.20,
]
a_ : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(a__)
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(a__) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
a_ : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
a_ : Any = linked_list.delete_tail()
assert result == 12.2
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
a_ : List[Any] = linked_list.delete_nth(1_0)
assert result is None
assert (
str(a__) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!"""))
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(a__)
assert (
str(a__)
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(a__)
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _UpperCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
a_ : List[Any] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """).strip())
linked_list.insert_head(input("""Inserting 2nd at head """).strip())
print("""\nPrint list:""")
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """).strip())
linked_list.insert_tail(input("""Inserting 2nd at tail """).strip())
print("""\nPrint list:""")
linked_list.print_list()
print("""\nDelete head""")
linked_list.delete_head()
print("""Delete tail""")
linked_list.delete_tail()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nReverse linked list""")
linked_list.reverse()
print("""\nPrint list:""")
linked_list.print_list()
print("""\nString representation of linked list:""")
print(a__)
print("""\nReading/changing Node data using indexing:""")
print(f'''Element at Position 1: {linked_list[1]}''')
a_ : List[Any] = input("""Enter New Value: """).strip()
print("""New list:""")
print(a__)
print(f'''length of linked_list is : {len(a__)}''')
if __name__ == "__main__":
main()
| 248 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
__snake_case : Tuple = logging.getLogger(__name__)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""")
parser.add_argument("""--file_path""" , type=a__ , default="""data/dump.txt""" , help="""The path to the data.""")
parser.add_argument("""--tokenizer_type""" , type=a__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""])
parser.add_argument("""--tokenizer_name""" , type=a__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""")
parser.add_argument("""--dump_file""" , type=a__ , default="""data/dump""" , help="""The dump file prefix.""")
a_ : int = parser.parse_args()
logger.info(f'''Loading Tokenizer ({args.tokenizer_name})''')
if args.tokenizer_type == "bert":
a_ : Any = BertTokenizer.from_pretrained(args.tokenizer_name)
a_ : Tuple = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
a_ : Optional[Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
a_ : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name)
a_ : str = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
a_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
a_ : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name)
a_ : Optional[int] = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
a_ : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'''Loading text from {args.file_path}''')
with open(args.file_path , """r""" , encoding="""utf8""") as fp:
a_ : Optional[Any] = fp.readlines()
logger.info("""Start encoding""")
logger.info(f'''{len(a__)} examples to process.''')
a_ : str = []
a_ : Optional[Any] = 0
a_ : str = 1_0_0_0_0
a_ : List[str] = time.time()
for text in data:
a_ : int = f'''{bos} {text.strip()} {sep}'''
a_ : str = tokenizer.encode(a__ , add_special_tokens=a__)
rslt.append(a__)
iter += 1
if iter % interval == 0:
a_ : str = time.time()
logger.info(f'''{iter} examples processed. - {(end-start):.2f}s/{interval}expl''')
a_ : int = time.time()
logger.info("""Finished binarization""")
logger.info(f'''{len(a__)} examples processed.''')
a_ : Union[str, Any] = f'''{args.dump_file}.{args.tokenizer_name}.pickle'''
a_ : int = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
a_ : List[Any] = [np.uintaa(a__) for d in rslt]
else:
a_ : List[str] = [np.intaa(a__) for d in rslt]
random.shuffle(rslt_)
logger.info(f'''Dump to {dp_file}''')
with open(a__ , """wb""") as handle:
pickle.dump(rslt_ , a__ , protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
main()
| 248 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( __A , __A ) -> List[Any]:
_snake_case = [0 for i in range(r + 1 )]
# nc0 = 1
_snake_case = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
_snake_case = min(lowerCAmelCase__ , lowerCAmelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 352 |
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = inspect.getfile(accelerate.test_utils )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
_snake_case = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_distributed_data_loop.py'] )
_snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_ops.py'] )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices.' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(F'Command: {cmd}' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
@require_multi_gpu
def lowerCamelCase ( self ):
"""simple docstring"""
print(F'Found {torch.cuda.device_count()} devices, using 2 devices only' )
_snake_case = ['torchrun', F'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='0,1' ):
execute_subprocess_async(lowerCAmelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
lowercase : Tuple = Accelerator()
lowercase : Optional[int] = (accelerator.state.process_index + 2, 10)
lowercase : Any = torch.randint(0, 10, shape).to(accelerator.device)
lowercase : Union[str, Any] = ""
lowercase : Dict = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase : int = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase : Any = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 160 | 0 |
"""simple docstring"""
def A_ ( _lowercase = 3, _lowercase = 7, _lowercase = 1000000 ):
'''simple docstring'''
snake_case_ :List[Any] = 0
snake_case_ :Any = 1
for current_denominator in range(1, limit + 1 ):
snake_case_ :int = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ :List[str] = current_numerator
snake_case_ :Any = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 66 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : str ) -> list:
if n_term == "":
return []
__a = []
for temp in range(int(lowerCAmelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
lowercase_ = input("Enter the last number (nth term) of the Harmonic Series")
print("Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n")
print(harmonic_series(nth_term))
| 45 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class UpperCamelCase :
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42 # [batch_size x 3]
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] ,dtype=np.floataa ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] ,dtype=np.floataa ) )
def _UpperCAmelCase ( self ) -> torch.Tensor:
'''simple docstring'''
lowercase_ : Dict = torch.arange(self.height * self.width )
lowercase_ : str = torch.stack(
[
pixel_indices % self.width,
torch.div(__UpperCamelCase ,self.width ,rounding_mode='trunc' ),
] ,axis=1 ,)
return coords
@property
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ , *lowercase_ : List[Any] = self.shape
lowercase_ : List[Any] = int(np.prod(__UpperCamelCase ) )
lowercase_ : Dict = self.get_image_coords()
lowercase_ : Tuple = torch.broadcast_to(coords.unsqueeze(0 ) ,[batch_size * inner_batch_size, *coords.shape] )
lowercase_ : List[Any] = self.get_camera_rays(__UpperCamelCase )
lowercase_ : Tuple = rays.view(__UpperCamelCase ,inner_batch_size * self.height * self.width ,2 ,3 )
return rays
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> torch.Tensor:
'''simple docstring'''
lowercase_ , *lowercase_ , lowercase_ : Tuple = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowercase_ : Dict = coords.view(__UpperCamelCase ,-1 ,2 )
lowercase_ : int = self.resolution()
lowercase_ : List[Any] = self.fov()
lowercase_ : Tuple = (flat.float() / (res - 1)) * 2 - 1
lowercase_ : Tuple = fracs * torch.tan(fov / 2 )
lowercase_ : str = fracs.view(__UpperCamelCase ,-1 ,2 )
lowercase_ : Optional[Any] = (
self.z.view(__UpperCamelCase ,1 ,3 )
+ self.x.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, :1]
+ self.y.view(__UpperCamelCase ,1 ,3 ) * fracs[:, :, 1:]
)
lowercase_ : Tuple = directions / directions.norm(dim=-1 ,keepdim=__UpperCamelCase )
lowercase_ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(__UpperCamelCase ,1 ,3 ) ,[batch_size, directions.shape[1], 3] ),
directions,
] ,dim=2 ,)
return rays.view(__UpperCamelCase ,*__UpperCamelCase ,2 ,3 )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> "DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin ,x=self.x ,y=self.y ,z=self.z ,width=__UpperCamelCase ,height=__UpperCamelCase ,x_fov=self.x_fov ,y_fov=self.y_fov ,)
def lowercase__( __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Optional[Any] = []
lowercase_ : Dict = []
lowercase_ : Tuple = []
lowercase_ : List[Any] = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowercase_ : Tuple = np.array([np.sin(__SCREAMING_SNAKE_CASE ), np.cos(__SCREAMING_SNAKE_CASE ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowercase_ : Optional[int] = -z * 4
lowercase_ : List[str] = np.array([np.cos(__SCREAMING_SNAKE_CASE ), -np.sin(__SCREAMING_SNAKE_CASE ), 0.0] )
lowercase_ : Tuple = np.cross(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
origins.append(__SCREAMING_SNAKE_CASE )
xs.append(__SCREAMING_SNAKE_CASE )
ys.append(__SCREAMING_SNAKE_CASE )
zs.append(__SCREAMING_SNAKE_CASE )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__SCREAMING_SNAKE_CASE , axis=0 ) ).float() , width=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__SCREAMING_SNAKE_CASE )) , )
| 321 | """simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_input_mask
lowercase_ : Optional[Any] = use_token_type_ids
lowercase_ : List[str] = use_labels
lowercase_ : Any = vocab_size
lowercase_ : List[str] = hidden_size
lowercase_ : Optional[int] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : int = intermediate_size
lowercase_ : List[Any] = hidden_act
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Tuple = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Optional[int] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : int = num_labels
lowercase_ : Any = num_choices
lowercase_ : int = scope
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase_ : Dict = None
if self.use_input_mask:
lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : Tuple = None
lowercase_ : Tuple = None
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices )
lowercase_ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase )
lowercase_ : Union[str, Any] = model(__UpperCamelCase )
lowercase_ : int = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : str = self.num_labels
lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[int] = config_and_inputs
lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = False
lowercase = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = ()
lowercase = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Dict = EsmModelTester(self )
lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : Optional[Any] = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
lowercase_ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0]
lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase )
lowercase_ : List[Any] = torch.empty(2 ,4 ,30 )
lowercase_ : List[str] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] )
lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase )
self.assertEqual(position_ids.shape ,expected_positions.shape )
self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
pass
@unittest.skip('Esm does not support embedding resizing' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
pass
@require_torch
class UpperCamelCase ( lowercase_ ):
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : List[str] = model(__UpperCamelCase )[0]
lowercase_ : Optional[int] = 33
lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape ,__UpperCamelCase )
lowercase_ : List[str] = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with torch.no_grad():
lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowercase_ : Dict = model(__UpperCamelCase )[0]
# compare the actual values for a slice.
lowercase_ : Any = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
| 321 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
UpperCAmelCase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def _a ( a :List[str] ) -> int:
a = {}
with open(UpperCamelCase__ , '''r''' ) as file:
for line_number, line in enumerate(UpperCamelCase__ ):
a = line.strip()
if line:
a = line.split()
a = line_number
a = words[0]
a = value
return result
def _a ( a :Dict , a :int , a :Tuple , a :str , a :Union[str, Any] ) -> Tuple:
for attribute in key.split('''.''' ):
a = getattr(UpperCamelCase__ , UpperCamelCase__ )
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
a = PARAM_MAPPING[full_name.split('''.''' )[-1]]
a = '''param'''
if weight_type is not None and weight_type != "param":
a = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
elif weight_type is not None and weight_type == "param":
a = hf_pointer
for attribute in hf_param_name.split('''.''' ):
a = getattr(UpperCamelCase__ , UpperCamelCase__ )
a = shape_pointer.shape
# let's reduce dimension
a = value[0]
else:
a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a = value
elif weight_type == "weight_g":
a = value
elif weight_type == "weight_v":
a = value
elif weight_type == "bias":
a = value
elif weight_type == "param":
for attribute in hf_param_name.split('''.''' ):
a = getattr(UpperCamelCase__ , UpperCamelCase__ )
a = value
else:
a = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _a ( a :List[Any] , a :List[Any] , a :List[str] , a :List[Any] , a :Any ) -> Union[str, Any]:
a = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCamelCase__ ):
a = PARAM_MAPPING[full_name.split('''.''' )[-1]]
a = '''param'''
if weight_type is not None and weight_type != "param":
a = '''.'''.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
a = '''.'''.join([key, hf_param_name] )
else:
a = key
a = value if '''lm_head''' in full_key else value[0]
UpperCAmelCase__ = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def _a ( a :List[Any] , a :str , a :Union[str, Any]=None , a :Optional[int]=None ) -> Union[str, Any]:
a = False
for key, mapped_key in MAPPING.items():
a = '''wav2vec2.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
a = True
if "*" in mapped_key:
a = name.split(UpperCamelCase__ )[0].split('''.''' )[-2]
a = mapped_key.replace('''*''' , UpperCamelCase__ )
if "weight_g" in name:
a = '''weight_g'''
elif "weight_v" in name:
a = '''weight_v'''
elif "bias" in name:
a = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a = '''weight'''
else:
a = None
if hf_dict is not None:
rename_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return is_used
return is_used
def _a ( a :List[str] , a :List[Any] , a :Union[str, Any] ) -> Optional[Any]:
a = []
a = fairseq_model.state_dict()
a = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
a = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
a = True
else:
a = load_wavaveca_layer(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( a :Dict , a :Optional[Any] , a :Dict , a :str , a :int ) -> Any:
a = full_name.split('''conv_layers.''' )[-1]
a = name.split('''.''' )
a = int(items[0] )
a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
a = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
a = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def _a ( a :Any , a :int , a :Any=None , a :str=None , a :int=True , a :str=False ) -> str:
if config_path is not None:
a = WavaVecaConfig.from_pretrained(UpperCamelCase__ )
else:
a = WavaVecaConfig()
if is_seq_class:
a = read_txt_into_dict(UpperCamelCase__ )
a = idalabel
a = WavaVecaForSequenceClassification(UpperCamelCase__ )
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
feature_extractor.save_pretrained(UpperCamelCase__ )
elif is_finetuned:
if dict_path:
a = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
a = target_dict.pad_index
a = target_dict.bos_index
a = target_dict.eos_index
a = len(target_dict.symbols )
a = os.path.join(UpperCamelCase__ , '''vocab.json''' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
a = target_dict.indices
# fairseq has the <pad> and <s> switched
a = 0
a = 1
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
a = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=UpperCamelCase__ , )
a = True if config.feat_extract_norm == '''layer''' else False
a = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
a = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
a = WavaVecaForCTC(UpperCamelCase__ )
else:
a = WavaVecaForPreTraining(UpperCamelCase__ )
if is_finetuned or is_seq_class:
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
a = argparse.Namespace(task='''audio_pretraining''' )
a = fairseq.tasks.setup_task(UpperCamelCase__ )
a , a , a = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=UpperCamelCase__ )
a = model[0].eval()
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , not is_finetuned )
hf_wavavec.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 90 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __lowerCAmelCase :
def __init__( self :List[Any] , __magic_name__ :Tuple , __magic_name__ :List[str]=14 , __magic_name__ :List[Any]=7 , __magic_name__ :Optional[Any]=True , __magic_name__ :str=True , __magic_name__ :List[str]=False , __magic_name__ :Any=True , __magic_name__ :Optional[int]=99 , __magic_name__ :Dict=32 , __magic_name__ :int=4 , __magic_name__ :int=4 , __magic_name__ :str=4 , __magic_name__ :Any=37 , __magic_name__ :List[Any]="gelu" , __magic_name__ :Any=0.1 , __magic_name__ :Optional[int]=0.1 , __magic_name__ :List[str]=512 , __magic_name__ :Any=0.02 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_input_mask
a = use_token_type_ids
a = use_labels
a = vocab_size
a = hidden_size
a = rotary_dim
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_act
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = initializer_range
a = None
a = vocab_size - 1
a = vocab_size - 1
a = vocab_size - 1
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = None
if self.use_input_mask:
a = random_attention_mask([self.batch_size, self.seq_length] )
a = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=__magic_name__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
a = self.prepare_config_and_inputs()
a , a , a = config_and_inputs
a = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self :int , __magic_name__ :int , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Dict ):
'''simple docstring'''
a = 20
a = model_class_name(__magic_name__ )
a = model.init_cache(input_ids.shape[0] , __magic_name__ )
a = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
a = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
a = model(
input_ids[:, -1:] , attention_mask=__magic_name__ , past_key_values=outputs_cache.past_key_values , position_ids=__magic_name__ , )
a = model(__magic_name__ )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
def lowerCamelCase__ ( self :Any , __magic_name__ :Tuple , __magic_name__ :List[str] , __magic_name__ :int , __magic_name__ :Any ):
'''simple docstring'''
a = 20
a = model_class_name(__magic_name__ )
a = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
a = model.init_cache(input_ids.shape[0] , __magic_name__ )
a = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
a = model(
input_ids[:, :-1] , attention_mask=__magic_name__ , past_key_values=__magic_name__ , position_ids=__magic_name__ , )
a = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="""i4""" )
a = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=__magic_name__ , position_ids=__magic_name__ , )
a = model(__magic_name__ , attention_mask=__magic_name__ )
a = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'Max diff is {diff}' )
@require_flax
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
UpperCamelCase__ = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = FlaxGPTJModelTester(self )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a , a , a = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def lowerCamelCase__ ( self :Any ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a , a , a = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
@tooslow
def lowerCamelCase__ ( self :List[str] ):
'''simple docstring'''
a = GPTaTokenizer.from_pretrained("""gpt2""" , pad_token="""<|endoftext|>""" , padding_side="""left""" )
a = tokenizer(["""Hello this is a long string""", """Hey"""] , return_tensors="""np""" , padding=__magic_name__ , truncation=__magic_name__ )
a = FlaxGPTJForCausalLM.from_pretrained("""EleutherAI/gpt-j-6B""" )
a = False
a = model.config.eos_token_id
a = jax.jit(model.generate )
a = jit_generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , pad_token_id=tokenizer.pad_token_id ).sequences
a = tokenizer.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )
a = [
"""Hello this is a long string of text.\n\nI'm trying to get the text of the""",
"""Hey, I'm a little late to the party. I'm going to""",
]
self.assertListEqual(__magic_name__ , __magic_name__ )
@is_pt_flax_cross_test
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__magic_name__ , __magic_name__ )
a , a = pt_inputs["""input_ids"""].shape
a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
a = 0
a = 1
a = 0
a = 1
a = pt_model_class(__magic_name__ ).eval()
a = model_class(__magic_name__ , dtype=jnp.floataa )
a = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , __magic_name__ )
a = fx_state
with torch.no_grad():
a = pt_model(**__magic_name__ ).to_tuple()
a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(__magic_name__ )
a = model_class.from_pretrained(__magic_name__ , from_pt=__magic_name__ )
a = fx_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output_loaded, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowerCamelCase__ ( self :List[Any] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
a = self._prepare_for_class(__magic_name__ , __magic_name__ )
a = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
a = model_class.__name__[4:] # Skip the "Flax" at the beginning
a = getattr(__magic_name__ , __magic_name__ )
a = pt_model_class(__magic_name__ ).eval()
a = model_class(__magic_name__ , dtype=jnp.floataa )
a = load_flax_weights_in_pytorch_model(__magic_name__ , fx_model.params )
a , a = pt_inputs["""input_ids"""].shape
a = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(__magic_name__ ):
a = 0
a = 1
a = 0
a = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
a = pt_model(**__magic_name__ ).to_tuple()
a = fx_model(**__magic_name__ ).to_tuple()
self.assertEqual(len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(__magic_name__ )
a = pt_model_class.from_pretrained(__magic_name__ , from_flax=__magic_name__ )
with torch.no_grad():
a = pt_model_loaded(**__magic_name__ ).to_tuple()
self.assertEqual(
len(__magic_name__ ) , len(__magic_name__ ) , """Output lengths differ between Flax and PyTorch""" )
for fx_output, pt_output in zip(__magic_name__ , __magic_name__ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowerCamelCase__ ( self :Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
a = model_class_name.from_pretrained("""EleutherAI/gpt-j-6B""" )
a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__magic_name__ )
| 347 |
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , __lowerCamelCase )
a = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
a = dataset_size < in_memory_max_size
else:
a = False
a = is_small_dataset(__lowerCamelCase )
assert result == expected
| 347 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
A_ : List[str] = logging.get_logger(__name__)
class lowerCamelCase (A__ ):
lowerCamelCase__ : List[Any] = ['input_features', 'attention_mask']
def __init__( self : str , __UpperCAmelCase : Any=8_0 , __UpperCAmelCase : Union[str, Any]=1_6_0_0_0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[Any]=1_0 , __UpperCAmelCase : Dict=2_5 , __UpperCAmelCase : List[Any]="hamming_window" , __UpperCAmelCase : Optional[Any]=32_768.0 , __UpperCAmelCase : Tuple=0.97 , __UpperCAmelCase : Optional[int]=1.0 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Optional[int]=False , **__UpperCAmelCase : Tuple , ) -> Optional[int]:
super().__init__(feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = feature_size
SCREAMING_SNAKE_CASE__ = sampling_rate
SCREAMING_SNAKE_CASE__ = padding_value
SCREAMING_SNAKE_CASE__ = hop_length
SCREAMING_SNAKE_CASE__ = win_length
SCREAMING_SNAKE_CASE__ = frame_signal_scale
SCREAMING_SNAKE_CASE__ = preemphasis_coeff
SCREAMING_SNAKE_CASE__ = mel_floor
SCREAMING_SNAKE_CASE__ = normalize_means
SCREAMING_SNAKE_CASE__ = normalize_vars
SCREAMING_SNAKE_CASE__ = win_function
SCREAMING_SNAKE_CASE__ = return_attention_mask
SCREAMING_SNAKE_CASE__ = win_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ = hop_length * sampling_rate // 1_0_0_0
SCREAMING_SNAKE_CASE__ = optimal_fft_length(self.sample_size )
SCREAMING_SNAKE_CASE__ = (self.n_fft // 2) + 1
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : np.array ) -> np.ndarray:
if self.win_function == "hamming_window":
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function , periodic=__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = window_function(window_length=self.sample_size , name=self.win_function )
SCREAMING_SNAKE_CASE__ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
SCREAMING_SNAKE_CASE__ = spectrogram(
one_waveform * self.frame_signal_scale , window=__UpperCAmelCase , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=__UpperCAmelCase , preemphasis=self.preemphasis_coeff , mel_filters=__UpperCAmelCase , mel_floor=self.mel_floor , log_mel="""log""" , )
return msfc_features.T
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Any:
# make sure we normalize float32 arrays
if self.normalize_means:
SCREAMING_SNAKE_CASE__ = x[:input_length].mean(axis=0 )
SCREAMING_SNAKE_CASE__ = np.subtract(__UpperCAmelCase , __UpperCAmelCase )
if self.normalize_vars:
SCREAMING_SNAKE_CASE__ = x[:input_length].std(axis=0 )
SCREAMING_SNAKE_CASE__ = np.divide(__UpperCAmelCase , __UpperCAmelCase )
if input_length < x.shape[0]:
SCREAMING_SNAKE_CASE__ = padding_value
# make sure array is in float32
SCREAMING_SNAKE_CASE__ = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : List[np.ndarray] , __UpperCAmelCase : Optional[np.ndarray] = None ) -> List[np.ndarray]:
SCREAMING_SNAKE_CASE__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__UpperCAmelCase , __UpperCAmelCase , self.padding_value ) for x, n in zip(__UpperCAmelCase , __UpperCAmelCase )]
def __call__( self : List[str] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[int] = None , **__UpperCAmelCase : List[str] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
F""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
F""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the ``sampling_rate`` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
SCREAMING_SNAKE_CASE__ = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
SCREAMING_SNAKE_CASE__ = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
SCREAMING_SNAKE_CASE__ = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE__ = [raw_speech]
# extract fbank features
SCREAMING_SNAKE_CASE__ = [self._extract_mfsc_features(__UpperCAmelCase ) for one_waveform in raw_speech]
# convert into correct format for padding
SCREAMING_SNAKE_CASE__ = BatchFeature({"""input_features""": features} )
SCREAMING_SNAKE_CASE__ = self.pad(
__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
# make sure list is in array format
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""input_features""" )
if isinstance(input_features[0] , __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in input_features]
SCREAMING_SNAKE_CASE__ = padded_inputs.get("""attention_mask""" )
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ = [np.asarray(__UpperCAmelCase , dtype=np.intaa ) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
SCREAMING_SNAKE_CASE__ = (
np.array(__UpperCAmelCase , dtype=np.intaa )
if self._get_padding_strategies(__UpperCAmelCase , max_length=__UpperCAmelCase ) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
SCREAMING_SNAKE_CASE__ = self.normalize(
padded_inputs["""input_features"""] , attention_mask=__UpperCAmelCase )
if return_tensors is not None:
SCREAMING_SNAKE_CASE__ = padded_inputs.convert_to_tensors(__UpperCAmelCase )
return padded_inputs
| 165 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
A_ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 165 | 1 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
snake_case_ : str = logging.getLogger(__name__)
class lowercase__ ( lowercase ):
def __init__( self : Tuple ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : int=None ):
'''simple docstring'''
super().__init__(
lowerCamelCase__ ,question_encoder_tokenizer=lowerCamelCase__ ,generator_tokenizer=lowerCamelCase__ ,index=lowerCamelCase__ ,init_retrieval=lowerCamelCase__ ,)
_UpperCamelCase : Dict = None
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : int ):
'''simple docstring'''
logger.info('initializing retrieval' )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info('dist initialized' )
# needs to be set manually
_UpperCamelCase : Tuple = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCamelCase : Optional[int] = str(distributed_port + 1 )
_UpperCamelCase : Union[str, Any] = dist.new_group(ranks=lowerCamelCase__ ,backend='gloo' )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info('dist not initialized / main' )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCamelCase_ ( self : Optional[int] ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Optional[Any] ,lowerCamelCase__ : Dict=torch.floataa ):
'''simple docstring'''
_UpperCamelCase : Tuple = torch.empty(lowerCamelCase__ ,dtype=lowerCamelCase__ )
dist.scatter(lowerCamelCase__ ,src=0 ,scatter_list=lowerCamelCase__ ,group=self.process_group )
return target_tensor
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Tuple = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCamelCase : Optional[Any] = next((addr for addr in addrs if addr.startswith('e' )) ,lowerCamelCase__ )
return ifname
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : np.ndarray ,lowerCamelCase__ : int ):
'''simple docstring'''
# single GPU training
if not dist.is_initialized():
_UpperCamelCase , _UpperCamelCase : List[Any] = self._main_retrieve(lowerCamelCase__ ,lowerCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCamelCase__ )
# distributed training
_UpperCamelCase : List[Any] = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCamelCase : str = None
if self._is_main():
_UpperCamelCase : List[Any] = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(lowerCamelCase__ )]
dist.gather(torch.tensor(lowerCamelCase__ ) ,dst=0 ,gather_list=lowerCamelCase__ ,group=self.process_group )
# scatter logic
_UpperCamelCase : Optional[int] = question_hidden_states.shape[0]
_UpperCamelCase : List[str] = []
_UpperCamelCase : Tuple = []
if self._is_main():
assert len(lowerCamelCase__ ) == world_size
_UpperCamelCase , _UpperCamelCase : List[Any] = self._main_retrieve(torch.cat(lowerCamelCase__ ).numpy() ,lowerCamelCase__ )
_UpperCamelCase , _UpperCamelCase : Tuple = torch.tensor(lowerCamelCase__ ), torch.tensor(lowerCamelCase__ )
_UpperCamelCase : Any = self._chunk_tensor(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self._chunk_tensor(lowerCamelCase__ ,lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = self._scattered(lowerCamelCase__ ,[n_queries, n_docs] ,target_type=torch.intaa )
_UpperCamelCase : Union[str, Any] = self._scattered(lowerCamelCase__ ,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCamelCase__ )
| 236 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
snake_case_ : Any = {
'configuration_mvp': ['MVP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MvpConfig', 'MvpOnnxConfig'],
'tokenization_mvp': ['MvpTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict = ['MvpTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[Any] = [
'MVP_PRETRAINED_MODEL_ARCHIVE_LIST',
'MvpForCausalLM',
'MvpForConditionalGeneration',
'MvpForQuestionAnswering',
'MvpForSequenceClassification',
'MvpModel',
'MvpPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
snake_case_ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 236 | 1 |
'''simple docstring'''
import numpy
class UpperCamelCase__ :
def __init__( self :Tuple , _A :numpy.ndarray , _A :numpy.ndarray ) -> None:
'''simple docstring'''
__A = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
__A = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
__A = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
__A = numpy.random.rand(3 , 1 )
# Real output values provided.
__A = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
__A = numpy.zeros(output_array.shape )
def lowercase_ ( self :Tuple ) -> numpy.ndarray:
'''simple docstring'''
__A = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
__A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
__A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def lowercase_ ( self :int ) -> None:
'''simple docstring'''
__A = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
__A = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
__A = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def lowercase_ ( self :Optional[int] , _A :numpy.ndarray , _A :int , _A :bool ) -> None:
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
__A = self.feedforward()
self.back_propagation()
if give_loss:
__A = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F'Iteration {iteration} Loss: {loss}' )
def lowercase_ ( self :Dict , _A :numpy.ndarray ) -> int:
'''simple docstring'''
__A = input_arr
__A = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
__A = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
__A = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def snake_case ( UpperCAmelCase )-> numpy.ndarray:
"""simple docstring"""
return 1 / (1 + numpy.exp(-value ))
def snake_case ( UpperCAmelCase )-> numpy.ndarray:
"""simple docstring"""
return (value) * (1 - (value))
def snake_case ( )-> int:
"""simple docstring"""
__A = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
__A = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
__A = TwoHiddenLayerNeuralNetwork(
input_array=UpperCAmelCase , output_array=UpperCAmelCase )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=UpperCAmelCase , iterations=1_0 , give_loss=UpperCAmelCase )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 161 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
def __init__( self :List[str] , _A :Tuple , _A :Optional[int]=13 , _A :List[Any]=7 , _A :Tuple=True , _A :Optional[Any]=True , _A :int=True , _A :Union[str, Any]=True , _A :Union[str, Any]=True , _A :Union[str, Any]=False , _A :int=False , _A :Any=False , _A :Tuple=2 , _A :Tuple=99 , _A :Union[str, Any]=0 , _A :Union[str, Any]=32 , _A :str=5 , _A :Optional[Any]=4 , _A :List[str]=0.1 , _A :List[Any]=0.1 , _A :Optional[Any]=512 , _A :Dict=2 , _A :Any=0.02 , _A :int=2 , _A :Dict=4 , _A :Optional[int]="last" , _A :str=True , _A :List[str]=None , _A :Optional[int]=0 , ) -> int:
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_lengths
__A = use_token_type_ids
__A = use_labels
__A = gelu_activation
__A = sinusoidal_embeddings
__A = causal
__A = asm
__A = n_langs
__A = vocab_size
__A = n_special
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = summary_type
__A = use_proj
__A = scope
__A = bos_token_id
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_input_lengths:
__A = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A = ids_tensor([self.batch_size] , 2 ).float()
__A = ids_tensor([self.batch_size] , self.num_choices )
__A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowercase_ ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowercase_ ( self :str , _A :Optional[int] , _A :Dict , _A :Union[str, Any] , _A :List[Any] , _A :str , _A :Union[str, Any] , _A :Optional[Any] , _A :List[str] , _A :Dict , ) -> Any:
'''simple docstring'''
__A = XLMModel(config=_A )
model.to(_A )
model.eval()
__A = model(_A , lengths=_A , langs=_A )
__A = model(_A , langs=_A )
__A = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self :int , _A :List[Any] , _A :List[str] , _A :List[Any] , _A :int , _A :Optional[int] , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :List[Any] , ) -> List[Any]:
'''simple docstring'''
__A = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
__A = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self :Union[str, Any] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :str , _A :Any , _A :Dict , _A :Any , _A :Union[str, Any] , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , start_positions=_A , end_positions=_A )
__A = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self :Union[str, Any] , _A :Any , _A :Union[str, Any] , _A :str , _A :Dict , _A :Optional[Any] , _A :Union[str, Any] , _A :List[str] , _A :str , _A :Optional[Any] , ) -> int:
'''simple docstring'''
__A = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
__A = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((__A) , ) = result_with_labels.to_tuple()
__A = model(_A , start_positions=_A , end_positions=_A )
((__A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowercase_ ( self :Optional[int] , _A :Optional[Any] , _A :Optional[int] , _A :List[Any] , _A :int , _A :Tuple , _A :Union[str, Any] , _A :List[Any] , _A :List[str] , _A :Dict , ) -> str:
'''simple docstring'''
__A = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
__A = model(_A )
__A = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase_ ( self :Optional[int] , _A :str , _A :List[str] , _A :Union[str, Any] , _A :Dict , _A :int , _A :Dict , _A :Union[str, Any] , _A :int , _A :Optional[Any] , ) -> List[str]:
'''simple docstring'''
__A = self.num_labels
__A = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
__A = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self :List[str] , _A :Optional[Any] , _A :List[str] , _A :List[Any] , _A :Union[str, Any] , _A :Any , _A :List[str] , _A :Optional[Any] , _A :Any , _A :Tuple , ) -> List[Any]:
'''simple docstring'''
__A = self.num_choices
__A = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ ( self :Union[str, Any] ) -> Dict:
'''simple docstring'''
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase):
UpperCAmelCase__ : Optional[int] = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : List[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self :int , _A :int , _A :Optional[Any] , _A :Dict , _A :List[Any] , _A :str ) -> str:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowercase_ ( self :int , _A :Optional[Any] , _A :Dict , _A :Optional[int]=False ) -> List[Any]:
'''simple docstring'''
__A = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
__A = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def lowercase_ ( self :Optional[int] ) -> Any:
'''simple docstring'''
__A = XLMModelTester(self )
__A = ConfigTester(self , config_class=_A , emb_dim=37 )
def lowercase_ ( self :Dict ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self :List[Any] ) -> Any:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def lowercase_ ( self :str ) -> List[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def lowercase_ ( self :Any ) -> Tuple:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def lowercase_ ( self :str ) -> str:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def lowercase_ ( self :List[Any] ) -> Optional[int]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def lowercase_ ( self :List[str] ) -> Optional[Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def lowercase_ ( self :Any ) -> Union[str, Any]:
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def lowercase_ ( self :Any , _A :str , _A :str , _A :int , _A :Optional[int] , _A :Any , _A :List[Any]=False , _A :Dict=1 ) -> Optional[int]:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = min_length + idx + 1
__A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def lowercase_ ( self :Optional[Any] , _A :str , _A :List[Any] , _A :str , _A :str , _A :int , _A :Union[str, Any]=False , _A :Optional[Any]=1 ) -> Dict:
'''simple docstring'''
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
__A = min_length + idx + 1
__A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def lowercase_ ( self :int ) -> Tuple:
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class UpperCamelCase__ ( unittest.TestCase):
@slow
def lowercase_ ( self :int ) -> str:
'''simple docstring'''
__A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(_A )
__A = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
__A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__A = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 161 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : Union[str, Any] = "gpt_neox_japanese"
def __init__( self : Dict ,lowerCamelCase__ : str=32000 ,lowerCamelCase__ : Any=2560 ,lowerCamelCase__ : Dict=32 ,lowerCamelCase__ : Optional[Any]=32 ,lowerCamelCase__ : Tuple=4 ,lowerCamelCase__ : Union[str, Any]="gelu" ,lowerCamelCase__ : Union[str, Any]=1.00 ,lowerCamelCase__ : str=10000 ,lowerCamelCase__ : List[str]=2048 ,lowerCamelCase__ : Union[str, Any]=0.02 ,lowerCamelCase__ : List[str]=1e-5 ,lowerCamelCase__ : int=True ,lowerCamelCase__ : int=31996 ,lowerCamelCase__ : Union[str, Any]=31999 ,lowerCamelCase__ : Optional[int]=0.1 ,lowerCamelCase__ : Optional[int]=0.0 ,**lowerCamelCase__ : int ,) -> List[str]:
'''simple docstring'''
super().__init__(bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_multiple_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = rotary_pct
SCREAMING_SNAKE_CASE = rotary_emb_base
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = use_cache
SCREAMING_SNAKE_CASE = attention_dropout
SCREAMING_SNAKE_CASE = hidden_dropout
| 193 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : int = ComputeEnvironment.AMAZON_SAGEMAKER
__snake_case : List[Any] = True
__snake_case : Optional[int] = "ml.p3.2xlarge"
__snake_case : List[str] = "accelerate_sagemaker_execution_role"
__snake_case : Tuple = "hf-sm"
__snake_case : Any = "us-east-1"
__snake_case : Union[str, Any] = 1
__snake_case : Dict = "accelerate-sagemaker-1"
__snake_case : Tuple = "1.6"
__snake_case : List[str] = "4.4"
__snake_case : str = "train.py"
__snake_case : List[str] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
__snake_case : Optional[int] = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args["""model_name_or_path"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""do_train"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""epochs"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""learning_rate"""] ,lowerCamelCase__ )
assert isinstance(converted_args["""max_steps"""] ,lowerCamelCase__ )
with pytest.raises(lowerCamelCase__ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 193 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase :
def __init__( self : Dict , __lowercase : str , __lowercase : List[str]=2 , __lowercase : Optional[Any]=True , __lowercase : List[Any]=False , __lowercase : Tuple=10 , __lowercase : List[str]=3 , __lowercase : int=32 * 8 , __lowercase : Dict=32 * 8 , __lowercase : Any=4 , __lowercase : List[Any]=64 , ):
"""simple docstring"""
__lowercase =parent
__lowercase =batch_size
__lowercase =is_training
__lowercase =use_auxiliary_loss
__lowercase =num_queries
__lowercase =num_channels
__lowercase =min_size
__lowercase =max_size
__lowercase =num_labels
__lowercase =hidden_dim
__lowercase =hidden_dim
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase =floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__lowercase )
__lowercase =torch.ones([self.batch_size, self.min_size, self.max_size] , device=__lowercase )
__lowercase =(
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__lowercase ) > 0.5
).float()
__lowercase =(torch.rand((self.batch_size, self.num_labels) , device=__lowercase ) > 0.5).long()
__lowercase =self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =MaskaFormerConfig(
hidden_size=self.hidden_dim , )
__lowercase =self.num_queries
__lowercase =self.num_labels
__lowercase =[1, 1, 1, 1]
__lowercase =self.num_channels
__lowercase =64
__lowercase =128
__lowercase =self.hidden_dim
__lowercase =self.hidden_dim
__lowercase =self.hidden_dim
return config
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =self.prepare_config_and_inputs()
__lowercase ={'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def snake_case ( self : List[str] , __lowercase : Any , __lowercase : Tuple ):
"""simple docstring"""
__lowercase =output.encoder_hidden_states
__lowercase =output.pixel_decoder_hidden_states
__lowercase =output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__lowercase ) , config.decoder_layers )
def snake_case ( self : Tuple , __lowercase : Tuple , __lowercase : int , __lowercase : List[Any] , __lowercase : Dict=False ):
"""simple docstring"""
with torch.no_grad():
__lowercase =MaskaFormerModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__lowercase =model(pixel_values=__lowercase , pixel_mask=__lowercase )
__lowercase =model(__lowercase , output_hidden_states=__lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__lowercase , __lowercase )
def snake_case ( self : List[str] , __lowercase : List[str] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : List[Any] , __lowercase : List[Any] ):
"""simple docstring"""
__lowercase =MaskaFormerForUniversalSegmentation(config=__lowercase )
model.to(__lowercase )
model.eval()
def comm_check_on_output(__lowercase : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
__lowercase =model(pixel_values=__lowercase , pixel_mask=__lowercase )
__lowercase =model(__lowercase )
comm_check_on_output(__lowercase )
__lowercase =model(
pixel_values=__lowercase , pixel_mask=__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
comm_check_on_output(__lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase ( A , A , unittest.TestCase ):
lowerCAmelCase_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase =MaskaFormerModelTester(self )
__lowercase =ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def snake_case ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case ( self : Any ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__lowercase )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def snake_case ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def snake_case ( self : List[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def snake_case ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def snake_case ( self : Union[str, Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def snake_case ( self : int ):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case ( self : Tuple ):
"""simple docstring"""
pass
def snake_case ( self : List[Any] ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__lowercase )
__lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase =[*signature.parameters.keys()]
__lowercase =['pixel_values']
self.assertListEqual(arg_names[:1] , __lowercase )
@slow
def snake_case ( self : Tuple ):
"""simple docstring"""
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
__lowercase =MaskaFormerModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =(self.model_tester.min_size,) * 2
__lowercase ={
'pixel_values': torch.randn((2, 3, *size) , device=__lowercase ),
'mask_labels': torch.randn((2, 10, *size) , device=__lowercase ),
'class_labels': torch.zeros(2 , 10 , device=__lowercase ).long(),
}
__lowercase =self.model_tester.get_config()
__lowercase =MaskaFormerForUniversalSegmentation(__lowercase ).to(__lowercase )
__lowercase =model(**__lowercase )
self.assertTrue(outputs.loss is not None )
def snake_case ( self : str ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__lowercase , **__lowercase , output_hidden_states=__lowercase )
def snake_case ( self : Optional[int] ):
"""simple docstring"""
__lowercase , __lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase =model_class(__lowercase ).to(__lowercase )
__lowercase =model(**__lowercase , output_attentions=__lowercase )
self.assertTrue(outputs.attentions is not None )
def snake_case ( self : Tuple ):
"""simple docstring"""
if not self.model_tester.is_training:
return
__lowercase =self.all_model_classes[1]
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =self.model_tester.prepare_config_and_inputs()
__lowercase =model_class(__lowercase )
model.to(__lowercase )
model.train()
__lowercase =model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase ).loss
loss.backward()
def snake_case ( self : Dict ):
"""simple docstring"""
__lowercase =self.all_model_classes[1]
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase =self.model_tester.prepare_config_and_inputs()
__lowercase =True
__lowercase =True
__lowercase =model_class(__lowercase ).to(__lowercase )
model.train()
__lowercase =model(__lowercase , mask_labels=__lowercase , class_labels=__lowercase )
__lowercase =outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
__lowercase =outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
__lowercase =outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
__lowercase =outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCAmelCase = 1E-4
def __UpperCamelCase ( ):
'''simple docstring'''
__lowercase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case ( self : Tuple ):
"""simple docstring"""
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def snake_case ( self : str ):
"""simple docstring"""
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def snake_case ( self : Optional[Any] ):
"""simple docstring"""
__lowercase =MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__lowercase )
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(__lowercase , return_tensors='pt' ).to(__lowercase )
__lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase =model(**__lowercase )
__lowercase =torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__lowercase =torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
__lowercase =torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(__lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __lowercase , atol=__lowercase ) )
def snake_case ( self : List[str] ):
"""simple docstring"""
__lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowercase ).eval()
__lowercase =self.default_image_processor
__lowercase =prepare_img()
__lowercase =image_processor(__lowercase , return_tensors='pt' ).to(__lowercase )
__lowercase =inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__lowercase , (1, 3, 384, 384) )
with torch.no_grad():
__lowercase =model(**__lowercase )
# masks_queries_logits
__lowercase =outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
__lowercase =[
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
__lowercase =torch.tensor(__lowercase ).to(__lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __lowercase , atol=__lowercase ) )
# class_queries_logits
__lowercase =outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
__lowercase =torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowercase , atol=__lowercase ) )
def snake_case ( self : int ):
"""simple docstring"""
__lowercase =MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__lowercase ).eval()
__lowercase =self.default_image_processor
__lowercase =image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='pt' , )
__lowercase =inputs['pixel_values'].to(__lowercase )
__lowercase =[el.to(__lowercase ) for el in inputs['mask_labels']]
__lowercase =[el.to(__lowercase ) for el in inputs['class_labels']]
with torch.no_grad():
__lowercase =model(**__lowercase )
self.assertTrue(outputs.loss is not None )
| 141 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : str, lowercase__ : bool = False ):
'''simple docstring'''
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected string as input, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
if not isinstance(lowercase__, lowercase__ ):
__lowercase =F'''Expected boolean as use_pascal parameter, found {type(lowercase__ )}'''
raise ValueError(lowercase__ )
__lowercase =input_str.split('_' )
__lowercase =0 if use_pascal else 1
__lowercase =words[start_index:]
__lowercase =[word[0].upper() + word[1:] for word in words_to_capitalize]
__lowercase ='' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 141 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( UpperCamelCase__ ):
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(UpperCamelCase_ , '''num_attention_heads''' ) )
class _a :
def __init__( self: Dict , UpperCamelCase_: List[Any] , UpperCamelCase_: str=13 , UpperCamelCase_: Optional[Any]=64 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Dict=3 , UpperCamelCase_: Optional[int]=2 , UpperCamelCase_: Any=1 , UpperCamelCase_: List[str]=16 , UpperCamelCase_: List[str]=[128, 256, 384] , UpperCamelCase_: Optional[int]=[4, 6, 8] , UpperCamelCase_: Dict=[2, 3, 4] , UpperCamelCase_: List[Any]=[16, 16, 16] , UpperCamelCase_: Union[str, Any]=0 , UpperCamelCase_: List[str]=[2, 2, 2] , UpperCamelCase_: int=[2, 2, 2] , UpperCamelCase_: Tuple=0.02 , UpperCamelCase_: List[str]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: Union[str, Any]=2 , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = kernel_size
lowercase__ = stride
lowercase__ = padding
lowercase__ = hidden_sizes
lowercase__ = num_attention_heads
lowercase__ = depths
lowercase__ = key_dim
lowercase__ = drop_path_rate
lowercase__ = patch_size
lowercase__ = attention_ratio
lowercase__ = mlp_ratio
lowercase__ = initializer_range
lowercase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = num_labels
lowercase__ = initializer_range
def lowerCamelCase_ ( self: str ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self: Tuple ) -> Optional[Any]:
"""simple docstring"""
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = LevitModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ )
lowercase__ = (self.image_size, self.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
lowercase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , UpperCamelCase_: List[str] ) -> str:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = LevitForImageClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
lowercase__ = model(UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: str ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
_lowercase : str = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowercase : Tuple = (
{
'''feature-extraction''': LevitModel,
'''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowercase : Any = False
_lowercase : Union[str, Any] = False
_lowercase : Tuple = False
_lowercase : Dict = False
_lowercase : Any = False
def lowerCamelCase_ ( self: int ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = LevitModelTester(self )
lowercase__ = ConfigTester(self , config_class=UpperCamelCase_ , has_text_modality=UpperCamelCase_ , hidden_size=37 )
def lowerCamelCase_ ( self: Optional[int] ) -> int:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason='''Levit does not use inputs_embeds''' )
def lowerCamelCase_ ( self: List[Any] ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not support input and output embeddings''' )
def lowerCamelCase_ ( self: Dict ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''Levit does not output attentions''' )
def lowerCamelCase_ ( self: Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(UpperCamelCase_ )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase_: int , UpperCamelCase_: Any , UpperCamelCase_: str ):
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ ) )
lowercase__ = outputs.hidden_states
lowercase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
lowercase__ = (self.model_tester.image_size, self.model_tester.image_size)
lowercase__ , lowercase__ = image_size[0], image_size[1]
for _ in range(4 ):
lowercase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
lowercase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase_ ( self: int ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Any , UpperCamelCase_: Optional[int]=False ) -> Optional[int]:
"""simple docstring"""
lowercase__ = super()._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowerCamelCase_ ( self: Any ) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase_ )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCamelCase_ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase_ ( self: Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase__ = False
lowercase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
lowercase__ = model_class(UpperCamelCase_ )
model.gradient_checkpointing_enable()
model.to(UpperCamelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
lowercase__ = model(**UpperCamelCase_ ).loss
loss.backward()
def lowerCamelCase_ ( self: List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [
{'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float},
{'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long},
{'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCamelCase_ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
lowercase__ = problem_type['''title''']
lowercase__ = problem_type['''num_labels''']
lowercase__ = model_class(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.train()
lowercase__ = self._prepare_for_class(UpperCamelCase_ , UpperCamelCase_ , return_labels=UpperCamelCase_ )
if problem_type["num_labels"] > 1:
lowercase__ = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] )
lowercase__ = inputs['''labels'''].to(problem_type['''dtype'''] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCamelCase_ ) as warning_list:
lowercase__ = model(**UpperCamelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowerCamelCase_ ( self: Dict ) -> str:
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = LevitModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _a ( ):
"""simple docstring"""
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Any ) -> Dict:
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self: List[Any] ) -> Tuple:
"""simple docstring"""
lowercase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
UpperCamelCase_ )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=UpperCamelCase_ , return_tensors='''pt''' ).to(UpperCamelCase_ )
# forward pass
with torch.no_grad():
lowercase__ = model(**UpperCamelCase_ )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase_ )
lowercase__ = torch.tensor([1.0448, -0.3745, -1.8317] ).to(UpperCamelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 93 |
import logging
from transformers import PretrainedConfig
lowerCAmelCase = logging.getLogger(__name__)
lowerCAmelCase = {
'bertabs-finetuned-cnndm': 'https://huggingface.co/remi/bertabs-finetuned-cnndm-extractive-abstractive-summarization/resolve/main/config.json',
}
class _a ( UpperCamelCase__ ):
_lowercase : List[Any] = '''bertabs'''
def __init__( self: List[str] , UpperCamelCase_: Dict=30_522 , UpperCamelCase_: Union[str, Any]=512 , UpperCamelCase_: Optional[int]=6 , UpperCamelCase_: int=512 , UpperCamelCase_: Optional[int]=8 , UpperCamelCase_: List[Any]=512 , UpperCamelCase_: Tuple=0.2 , UpperCamelCase_: List[Any]=6 , UpperCamelCase_: Tuple=768 , UpperCamelCase_: List[Any]=8 , UpperCamelCase_: Union[str, Any]=2_048 , UpperCamelCase_: str=0.2 , **UpperCamelCase_: Any , ) -> List[str]:
"""simple docstring"""
super().__init__(**UpperCamelCase_ )
lowercase__ = vocab_size
lowercase__ = max_pos
lowercase__ = enc_layers
lowercase__ = enc_hidden_size
lowercase__ = enc_heads
lowercase__ = enc_ff_size
lowercase__ = enc_dropout
lowercase__ = dec_layers
lowercase__ = dec_hidden_size
lowercase__ = dec_heads
lowercase__ = dec_ff_size
lowercase__ = dec_dropout
| 93 | 1 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=36 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=6 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=10_00 , ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = text_seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_input_mask
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : str = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Tuple = hidden_act
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = coordinate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = shape_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : int = num_choices
SCREAMING_SNAKE_CASE__ : Tuple = scope
SCREAMING_SNAKE_CASE__ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE__ : List[Any] = text_seq_length
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_seq_length + self.image_seq_length
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : Any = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : int = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = t
SCREAMING_SNAKE_CASE__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = LayoutLMvaModel(config=A__ )
model.to(A__ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(A__ , pixel_values=A__ )
SCREAMING_SNAKE_CASE__ : Dict = model(
A__ , bbox=A__ , pixel_values=A__ , attention_mask=A__ , token_type_ids=A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(A__ , bbox=A__ , pixel_values=A__ , token_type_ids=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(A__ , bbox=A__ , pixel_values=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE__ : Dict = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE__ : Optional[int] = model(pixel_values=A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = LayoutLMvaForSequenceClassification(A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(
A__ , bbox=A__ , pixel_values=A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Optional[Any] = LayoutLMvaForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(
A__ , bbox=A__ , pixel_values=A__ , attention_mask=A__ , token_type_ids=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = LayoutLMvaForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
SCREAMING_SNAKE_CASE__ : str = model(
A__ , bbox=A__ , pixel_values=A__ , attention_mask=A__ , token_type_ids=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : int = config_and_inputs
SCREAMING_SNAKE_CASE__ : str = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Dict = False
__UpperCamelCase : Any = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Any = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCamelCase : int = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
return True
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = ConfigTester(self , config_class=A__ , hidden_size=37 )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = copy.deepcopy(A__ )
if model_class in get_values(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(A__ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(A__ ):
SCREAMING_SNAKE_CASE__ : str = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=A__ )
elif model_class in get_values(A__ ):
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
elif model_class in [
*get_values(A__ ),
]:
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=A__ )
elif model_class in [
*get_values(A__ ),
]:
SCREAMING_SNAKE_CASE__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=A__ , )
return inputs_dict
def __magic_name__ (self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Tuple = type
self.model_tester.create_and_check_model(*A__ )
def __magic_name__ (self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def __magic_name__ (self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Optional[int] = LayoutLMvaModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class lowerCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def __magic_name__ (self ) -> str:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=A__ ) if is_vision_available() else None
@slow
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(A__ )
SCREAMING_SNAKE_CASE__ : str = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(images=A__ , return_tensors="""pt""" ).pixel_values.to(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(
input_ids=input_ids.to(A__ ) , bbox=bbox.to(A__ ) , pixel_values=pixel_values.to(A__ ) , )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , A__ , atol=1E-4 ) )
| 25 |
import requests
from bsa import BeautifulSoup
def UpperCamelCase ( lowerCAmelCase__ = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
lowercase = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , '''html.parser''' )
lowercase = soup.findAll('''h1''' )
lowercase = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase__ , lowerCAmelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'{key}\n{value}\n')
| 101 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowercase = 0
__lowercase = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowercase = tuple[int, int]
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
__UpperCamelCase :Optional[Any] = pos_x
__UpperCamelCase :int = pos_y
__UpperCamelCase :Tuple = (pos_y, pos_x)
__UpperCamelCase :List[str] = goal_x
__UpperCamelCase :List[str] = goal_y
__UpperCamelCase :Optional[int] = g_cost
__UpperCamelCase :Tuple = parent
__UpperCamelCase :Tuple = self.calculate_heuristic()
__UpperCamelCase :str = self.g_cost + self.h_cost
def UpperCamelCase__ ( self) -> float:
__UpperCamelCase :Tuple = self.pos_x - self.goal_x
__UpperCamelCase :str = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(__lowercase) + abs(__lowercase)
else:
return sqrt(dy**2 + dx**2)
def __lt__( self , __lowercase) -> bool:
return self.f_cost < other.f_cost
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , __lowercase)
__UpperCamelCase :int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , __lowercase)
__UpperCamelCase :Dict = [self.start]
__UpperCamelCase :list[Node] = []
__UpperCamelCase :Dict = False
def UpperCamelCase__ ( self) -> list[TPosition]:
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__UpperCamelCase :Dict = self.open_nodes.pop(0)
if current_node.pos == self.target.pos:
return self.retrace_path(__lowercase)
self.closed_nodes.append(__lowercase)
__UpperCamelCase :Optional[int] = self.get_successors(__lowercase)
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(__lowercase)
else:
# retrieve the best current path
__UpperCamelCase :str = self.open_nodes.pop(self.open_nodes.index(__lowercase))
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(__lowercase)
else:
self.open_nodes.append(__lowercase)
return [self.start.pos]
def UpperCamelCase__ ( self , __lowercase) -> list[Node]:
__UpperCamelCase :List[str] = []
for action in delta:
__UpperCamelCase :str = parent.pos_x + action[1]
__UpperCamelCase :List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(__lowercase) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
__lowercase , __lowercase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , __lowercase , ))
return successors
def UpperCamelCase__ ( self , __lowercase) -> list[TPosition]:
__UpperCamelCase :Optional[int] = node
__UpperCamelCase :Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x))
__UpperCamelCase :List[str] = current_node.parent
path.reverse()
return path
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase) -> None:
__UpperCamelCase :int = AStar(__lowercase , __lowercase)
__UpperCamelCase :Optional[Any] = AStar(__lowercase , __lowercase)
__UpperCamelCase :Optional[int] = False
def UpperCamelCase__ ( self) -> list[TPosition]:
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
__UpperCamelCase :List[str] = self.fwd_astar.open_nodes.pop(0)
__UpperCamelCase :Optional[Any] = self.bwd_astar.open_nodes.pop(0)
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
__lowercase , __lowercase)
self.fwd_astar.closed_nodes.append(__lowercase)
self.bwd_astar.closed_nodes.append(__lowercase)
__UpperCamelCase :int = current_bwd_node
__UpperCamelCase :List[Any] = current_fwd_node
__UpperCamelCase :Optional[int] = {
self.fwd_astar: self.fwd_astar.get_successors(__lowercase),
self.bwd_astar: self.bwd_astar.get_successors(__lowercase),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(__lowercase)
else:
# retrieve the best current path
__UpperCamelCase :Optional[int] = astar.open_nodes.pop(
astar.open_nodes.index(__lowercase))
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(__lowercase)
else:
astar.open_nodes.append(__lowercase)
return [self.fwd_astar.start.pos]
def UpperCamelCase__ ( self , __lowercase , __lowercase) -> list[TPosition]:
__UpperCamelCase :List[str] = self.fwd_astar.retrace_path(__lowercase)
__UpperCamelCase :Optional[Any] = self.bwd_astar.retrace_path(__lowercase)
bwd_path.pop()
bwd_path.reverse()
__UpperCamelCase :Optional[int] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowercase = (0, 0)
__lowercase = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase = time.time()
__lowercase = AStar(init, goal)
__lowercase = a_star.search()
__lowercase = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
__lowercase = time.time()
__lowercase = BidirectionalAStar(init, goal)
__lowercase = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 105 | from __future__ import annotations
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A : List[Any] = logging.get_logger(__name__)
__A : Tuple = {
"Salesforce/instruct-blip-flan-t5": "https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json",
}
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'instructblip_vision_model'
def __init__( self : List[str] , lowerCamelCase : Optional[Any]=14_08 , lowerCamelCase : int=61_44 , lowerCamelCase : Any=39 , lowerCamelCase : Any=16 , lowerCamelCase : Optional[Any]=2_24 , lowerCamelCase : List[Any]=14 , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : List[str]=1E-6 , lowerCamelCase : int=0.0 , lowerCamelCase : Optional[Any]=1E-10 , lowerCamelCase : Union[str, Any]=True , **lowerCamelCase : Any , ) -> Optional[Any]:
super().__init__(**lowerCamelCase )
lowerCAmelCase_ : Optional[int] = hidden_size
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_attention_heads
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : List[Any] = image_size
lowerCAmelCase_ : Tuple = initializer_range
lowerCAmelCase_ : List[Any] = attention_dropout
lowerCAmelCase_ : Tuple = layer_norm_eps
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Optional[int] = qkv_bias
@classmethod
def __lowercase ( cls : Dict , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : Dict = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase_ : int = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'instructblip_qformer'
def __init__( self : Any , lowerCamelCase : Any=3_05_22 , lowerCamelCase : Tuple=7_68 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : List[Any]=12 , lowerCamelCase : List[str]=30_72 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : int=0.1 , lowerCamelCase : Any=0.1 , lowerCamelCase : Optional[Any]=5_12 , lowerCamelCase : str=0.02 , lowerCamelCase : Optional[Any]=1E-12 , lowerCamelCase : int=0 , lowerCamelCase : int="absolute" , lowerCamelCase : str=2 , lowerCamelCase : str=14_08 , **lowerCamelCase : List[Any] , ) -> str:
super().__init__(pad_token_id=lowerCamelCase , **lowerCamelCase )
lowerCAmelCase_ : Any = vocab_size
lowerCAmelCase_ : Dict = hidden_size
lowerCAmelCase_ : Union[str, Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : List[Any] = intermediate_size
lowerCAmelCase_ : Tuple = hidden_dropout_prob
lowerCAmelCase_ : Any = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = max_position_embeddings
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Union[str, Any] = layer_norm_eps
lowerCAmelCase_ : int = position_embedding_type
lowerCAmelCase_ : int = cross_attention_frequency
lowerCAmelCase_ : Any = encoder_hidden_size
@classmethod
def __lowercase ( cls : Dict , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(lowerCamelCase )
lowerCAmelCase_, lowerCAmelCase_ : int = cls.get_config_dict(lowerCamelCase , **lowerCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
lowerCAmelCase_ : str = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(lowerCamelCase , **lowerCamelCase )
class __snake_case ( _SCREAMING_SNAKE_CASE):
"""simple docstring"""
lowercase = 'instructblip'
lowercase = True
def __init__( self : List[str] , lowerCamelCase : Any=None , lowerCamelCase : List[Any]=None , lowerCamelCase : str=None , lowerCamelCase : Dict=32 , **lowerCamelCase : Optional[int] ) -> Optional[int]:
super().__init__(**lowerCamelCase )
if vision_config is None:
lowerCAmelCase_ : str = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
lowerCAmelCase_ : Optional[int] = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
lowerCAmelCase_ : Optional[int] = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
lowerCAmelCase_ : Tuple = InstructBlipVisionConfig(**lowerCamelCase )
lowerCAmelCase_ : Optional[int] = InstructBlipQFormerConfig(**lowerCamelCase )
lowerCAmelCase_ : Optional[int] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
lowerCAmelCase_ : Optional[int] = CONFIG_MAPPING[text_model_type](**lowerCamelCase )
lowerCAmelCase_ : int = self.text_config.tie_word_embeddings
lowerCAmelCase_ : Any = self.text_config.is_encoder_decoder
lowerCAmelCase_ : int = num_query_tokens
lowerCAmelCase_ : str = self.vision_config.hidden_size
lowerCAmelCase_ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCAmelCase_ : int = 1.0
lowerCAmelCase_ : List[str] = 0.02
@classmethod
def __lowercase ( cls : Tuple , lowerCamelCase : InstructBlipVisionConfig , lowerCamelCase : InstructBlipQFormerConfig , lowerCamelCase : PretrainedConfig , **lowerCamelCase : Any , ) -> int:
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **lowerCamelCase , )
def __lowercase ( self : int ) -> Any:
lowerCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : Union[str, Any] = self.vision_config.to_dict()
lowerCAmelCase_ : str = self.qformer_config.to_dict()
lowerCAmelCase_ : Union[str, Any] = self.text_config.to_dict()
lowerCAmelCase_ : List[Any] = self.__class__.model_type
return output
| 120 |
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_ ( A__ : int = 50_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ , len(A__ ) ):
lowerCAmelCase_ : int = pentagonal_nums[j]
lowerCAmelCase_ : Union[str, Any] = pentagonal_i + pentagonal_j
lowerCAmelCase_ : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = """laion/clap-htsat-unfused"""
lowercase_ : Optional[Any] = tempfile.mkdtemp()
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : int ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **__A )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **lowercase_ : List[Any] ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **__A )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = self.get_tokenizer()
lowercase_ : Optional[Any] = self.get_feature_extractor()
lowercase_ : Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Optional[Any] = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ : str = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Dict = self.get_feature_extractor(do_normalize=__A , padding_value=1.0 )
lowercase_ : Union[str, Any] = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Dict = self.get_feature_extractor()
lowercase_ : str = self.get_tokenizer()
lowercase_ : List[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowercase_ : Optional[Any] = floats_list((3, 1000) )
lowercase_ : Optional[Any] = feature_extractor(__A , return_tensors="""np""" )
lowercase_ : str = processor(audios=__A , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[Any] = self.get_feature_extractor()
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowercase_ : List[Any] = """This is a test string"""
lowercase_ : Dict = processor(text=__A )
lowercase_ : List[str] = tokenizer(__A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = self.get_feature_extractor()
lowercase_ : Tuple = self.get_tokenizer()
lowercase_ : Optional[Any] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
lowercase_ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ : Tuple = processor.batch_decode(__A )
lowercase_ : Optional[Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[Any] = self.get_feature_extractor()
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[int] = ClapProcessor(tokenizer=__A , feature_extractor=__A )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 351 | '''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : Optional[int] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Tuple ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : int = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : dict , lowercase_ : List[str]=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Union[str, Any] = {}
lowercase_ , lowercase_ : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : int = PIL.Image.open(lowercase_ )
else:
lowercase_ : str = path.split("""::""" )[-1]
try:
lowercase_ : Any = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Optional[Any] = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : str = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Dict = BytesIO(f.read() )
lowercase_ : Optional[Any] = PIL.Image.open(bytes_ )
else:
lowercase_ : Any = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Any = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Any = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : Optional[int] = storage.field("""bytes""" )
else:
lowercase_ : Optional[Any] = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : Dict = storage.field("""path""" )
else:
lowercase_ : int = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : Optional[int] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Tuple = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Optional[Any] ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : int = f.read()
return bytes_
lowercase_ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : Any = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : int = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : int = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : List[Any] = array.dtype
lowercase_ : int = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Dict = dtype.kind
lowercase_ : List[Any] = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : int = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : str = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : str = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Optional[Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[int] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Dict = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : int = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 21 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def lowercase (snake_case__ : list , snake_case__ : list ) -> list:
'''simple docstring'''
if len(snake_case__ ) != 2 or len(a[0] ) != 2 or len(snake_case__ ) != 2 or len(b[0] ) != 2:
raise Exception("""Matrices are not 2x2""" )
lowerCAmelCase = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase (snake_case__ : list , snake_case__ : list ) -> Optional[Any]:
'''simple docstring'''
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def lowercase (snake_case__ : list , snake_case__ : list ) -> Dict:
'''simple docstring'''
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(snake_case__ ) )
]
def lowercase (snake_case__ : list ) -> tuple[list, list, list, list]:
'''simple docstring'''
if len(snake_case__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception("""Odd matrices are not supported!""" )
lowerCAmelCase = len(snake_case__ )
lowerCAmelCase = matrix_length // 2
lowerCAmelCase = [[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ )]
lowerCAmelCase = [
[a[i][j] for j in range(snake_case__ , snake_case__ )] for i in range(snake_case__ , snake_case__ )
]
lowerCAmelCase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ )]
lowerCAmelCase = [[a[i][j] for j in range(snake_case__ )] for i in range(snake_case__ , snake_case__ )]
return top_left, top_right, bot_left, bot_right
def lowercase (snake_case__ : list ) -> tuple[int, int]:
'''simple docstring'''
return len(snake_case__ ), len(matrix[0] )
def lowercase (snake_case__ : list ) -> None:
'''simple docstring'''
print("""\n""".join(str(snake_case__ ) for line in matrix ) )
def lowercase (snake_case__ : list , snake_case__ : list ) -> list:
'''simple docstring'''
if matrix_dimensions(snake_case__ ) == (2, 2):
return default_matrix_multiplication(snake_case__ , snake_case__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = split_matrix(snake_case__ )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = split_matrix(snake_case__ )
lowerCAmelCase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
lowerCAmelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
lowerCAmelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ )
lowerCAmelCase = actual_strassen(snake_case__ , matrix_subtraction(snake_case__ , snake_case__ ) )
lowerCAmelCase = actual_strassen(matrix_addition(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
lowerCAmelCase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
lowerCAmelCase = actual_strassen(matrix_subtraction(snake_case__ , snake_case__ ) , matrix_addition(snake_case__ , snake_case__ ) )
lowerCAmelCase = matrix_addition(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
lowerCAmelCase = matrix_addition(snake_case__ , snake_case__ )
lowerCAmelCase = matrix_addition(snake_case__ , snake_case__ )
lowerCAmelCase = matrix_subtraction(matrix_subtraction(matrix_addition(snake_case__ , snake_case__ ) , snake_case__ ) , snake_case__ )
# construct the new matrix from our 4 quadrants
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(snake_case__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase (snake_case__ : list , snake_case__ : list ) -> list:
'''simple docstring'''
if matrix_dimensions(snake_case__ )[1] != matrix_dimensions(snake_case__ )[0]:
lowerCAmelCase = (
"""Unable to multiply these matrices, please check the dimensions.\n"""
f'''Matrix A: {matrixa}\n'''
f'''Matrix B: {matrixa}'''
)
raise Exception(snake_case__ )
lowerCAmelCase = matrix_dimensions(snake_case__ )
lowerCAmelCase = matrix_dimensions(snake_case__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowerCAmelCase = max(*snake_case__ , *snake_case__ )
lowerCAmelCase = int(math.pow(2 , math.ceil(math.loga(snake_case__ ) ) ) )
lowerCAmelCase = matrixa
lowerCAmelCase = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowerCAmelCase = actual_strassen(snake_case__ , snake_case__ )
# Removing the additional zeros
for i in range(0 , snake_case__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , snake_case__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a = [[0, 2, 1, 1], [1_6, 2, 3, 3], [2, 2, 7, 7], [1_3, 1_1, 2_2, 4]]
print(strassen(matrixa, matrixa))
| 155 |
"""simple docstring"""
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _a , unittest.TestCase ):
_a = CLIPTokenizer
_a = CLIPTokenizerFast
_a = True
_a = {}
_a = False
def __lowercase ( self : Tuple ):
super().setUp()
# fmt: off
lowerCAmelCase = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
lowerCAmelCase = {"""unk_token""": """<unk>"""}
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase ) )
def __lowercase ( self : Optional[Any] , **lowerCAmelCase : str ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Any , **lowerCAmelCase : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Dict ):
lowerCAmelCase = """lower newer"""
lowerCAmelCase = """lower newer"""
return input_text, output_text
def __lowercase ( self : int ):
lowerCAmelCase = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
lowerCAmelCase = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
lowerCAmelCase = tokens + [tokenizer.unk_token]
lowerCAmelCase = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , lowerCAmelCase )
@require_ftfy
def __lowercase ( self : Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
lowerCAmelCase = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
lowerCAmelCase = """xa\u0303y""" + """ """ + """x\xe3y"""
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of space type
lowerCAmelCase = [
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that the tokenization is identical on unicode of line break type
lowerCAmelCase = [
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
lowerCAmelCase = tokenizer_s.tokenize(lowerCAmelCase )
lowerCAmelCase = tokenizer_r.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : Any ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowerCAmelCase = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase = f'''{text_of_1_token} {text_of_1_token}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase ) + 1, len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
lowerCAmelCase = f''' {text}'''
lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase , use_fast=lowerCAmelCase , )
lowerCAmelCase = tokenizer_r(lowerCAmelCase , return_offsets_mapping=lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase ) + 1, 1 + len(lowerCAmelCase ) + 1 + len(lowerCAmelCase )) , )
def __lowercase ( self : Dict ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __lowercase ( self : Optional[int] ):
super().test_tokenization_python_rust_equals()
def __lowercase ( self : Optional[int] ):
# CLIP always lower cases letters
pass
| 155 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class a ( _UpperCamelCase ):
_lowercase = 'audio-spectrogram-transformer'
def __init__( self , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.0 , A_=0.0 , A_=0.02 , A_=1e-12 , A_=16 , A_=True , A_=10 , A_=10 , A_=1024 , A_=128 , **A_ , ):
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Union[str, Any] = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : List[Any] = layer_norm_eps
_UpperCAmelCase : List[Any] = patch_size
_UpperCAmelCase : Optional[Any] = qkv_bias
_UpperCAmelCase : Tuple = frequency_stride
_UpperCAmelCase : Tuple = time_stride
_UpperCAmelCase : Optional[int] = max_length
_UpperCAmelCase : Optional[Any] = num_mel_bins
| 368 |
from typing import Any
class a :
def __init__( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = data
_UpperCAmelCase : Any = None
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = None
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : str = self.head
while temp is not None:
print(temp.data , end=" " )
_UpperCAmelCase : str = temp.next
print()
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[int] = Node(A_ )
_UpperCAmelCase : Tuple = self.head
_UpperCAmelCase : Tuple = new_node
def _UpperCAmelCase ( self , A_ , A_ ):
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase : int = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : Tuple = node_a.next
_UpperCAmelCase : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase : List[Any] = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = node_a.data, node_a.data
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('After swapping')
ll.print_list()
| 189 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowercase ( A__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
SCREAMING_SNAKE_CASE__ : Dict = '''CIDAS/clipseg-rd64-refined'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''image_segmenter'''
SCREAMING_SNAKE_CASE__ : Tuple = CLIPSegForImageSegmentation
SCREAMING_SNAKE_CASE__ : List[Any] = ['''image''', '''text''']
SCREAMING_SNAKE_CASE__ : str = ['''image''']
def __init__( self :Dict , *lowerCAmelCase__ :str , **lowerCAmelCase__ :str ) -> Tuple:
requires_backends(self , ['''vision'''] )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :"Image" , lowerCAmelCase__ :str ) -> Dict:
return self.pre_processor(text=[label] , images=[image] , padding=lowerCAmelCase__ , return_tensors='''pt''' )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Tuple ) -> Optional[int]:
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model(**lowerCAmelCase__ ).logits
return logits
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Optional[int] ) -> Any:
__SCREAMING_SNAKE_CASE : Tuple = outputs.cpu().detach().numpy()
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 9 |
from __future__ import annotations
from cmath import sqrt
def _a ( UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
lowerCAmelCase__ = b * b - 4 * a * c
lowerCAmelCase__ = (-b + sqrt(UpperCamelCase_ )) / (2 * a)
lowerCAmelCase__ = (-b - sqrt(UpperCamelCase_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def _a ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"The solutions are: {solutiona} and {solutiona}" )
if __name__ == "__main__":
main()
| 340 | 0 |
from ... import PretrainedConfig
__A = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowercase_ = "nezha"
def __init__(self : Dict , UpperCAmelCase_ : Any=21_128 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=512 , UpperCAmelCase_ : List[Any]=64 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : int=1E-1_2 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : List[str]=True , **UpperCAmelCase_ : Any , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Dict =vocab_size
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: List[Any] =num_hidden_layers
lowerCamelCase__: Any =num_attention_heads
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: int =intermediate_size
lowerCamelCase__: Optional[Any] =hidden_dropout_prob
lowerCamelCase__: Optional[int] =attention_probs_dropout_prob
lowerCamelCase__: Optional[Any] =max_position_embeddings
lowerCamelCase__: str =max_relative_position
lowerCamelCase__: Optional[Any] =type_vocab_size
lowerCamelCase__: Any =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: int =classifier_dropout
lowerCamelCase__: Dict =use_cache
| 273 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = CTRLTokenizer
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Optional[Any] =["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
lowerCamelCase__: Optional[Any] =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: Union[str, Any] =["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
lowerCamelCase__: Union[str, Any] ={"unk_token": "<unk>"}
lowerCamelCase__: Union[str, Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(UpperCAmelCase_) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : List[str] , **UpperCAmelCase_ : str) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: int ="adapt react readapt apt"
lowerCamelCase__: int ="adapt react readapt apt"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
lowerCamelCase__: Union[str, Any] ="adapt react readapt apt"
lowerCamelCase__: Tuple ="adapt re@@ a@@ c@@ t re@@ adapt apt".split()
lowerCamelCase__: Dict =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokens + [tokenizer.unk_token]
lowerCamelCase__: List[str] =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 273 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBRegressor(verbosity=0 , random_state=4_2 )
xgb.fit(lowerCamelCase__ , lowerCamelCase__ )
# Predict target for test data
lowerCamelCase_ = xgb.predict(lowerCamelCase__ )
lowerCamelCase_ = predictions.reshape(len(lowerCamelCase__ ) , 1 )
return predictions
def lowerCamelCase_ ( ):
lowerCamelCase_ = fetch_california_housing()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 , random_state=1 )
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(lowerCamelCase__ , lowerCamelCase__ )}' )
print(F'Mean Square Error : {mean_squared_error(lowerCamelCase__ , lowerCamelCase__ )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 | import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
_SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="""relu""")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="""relu"""))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="""relu"""))
classifier.add(layers.Dense(units=1, activation="""sigmoid"""))
# Compiling the CNN
classifier.compile(
optimizer="""adam""", loss="""binary_crossentropy""", metrics=["""accuracy"""]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
_SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
"""dataset/training_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
_SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
"""dataset/test_set""", target_size=(6_4, 6_4), batch_size=3_2, class_mode="""binary"""
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("""cnn.h5""")
# Part 3 - Making new predictions
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
"""dataset/single_prediction/image.png""", target_size=(6_4, 6_4)
)
_SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
_SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
_SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
_SCREAMING_SNAKE_CASE = """Normal"""
if result[0][0] == 1:
_SCREAMING_SNAKE_CASE = """Abnormality detected"""
| 343 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def lowercase__ ( __snake_case : Any , __snake_case : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = checkpoint
UpperCAmelCase_ : Optional[int] = {}
UpperCAmelCase_ : Any = vae_state_dict['encoder.conv_in.weight']
UpperCAmelCase_ : Any = vae_state_dict['encoder.conv_in.bias']
UpperCAmelCase_ : str = vae_state_dict['encoder.conv_out.weight']
UpperCAmelCase_ : Union[str, Any] = vae_state_dict['encoder.conv_out.bias']
UpperCAmelCase_ : int = vae_state_dict['encoder.norm_out.weight']
UpperCAmelCase_ : List[str] = vae_state_dict['encoder.norm_out.bias']
UpperCAmelCase_ : int = vae_state_dict['decoder.conv_in.weight']
UpperCAmelCase_ : Tuple = vae_state_dict['decoder.conv_in.bias']
UpperCAmelCase_ : List[Any] = vae_state_dict['decoder.conv_out.weight']
UpperCAmelCase_ : List[str] = vae_state_dict['decoder.conv_out.bias']
UpperCAmelCase_ : Dict = vae_state_dict['decoder.norm_out.weight']
UpperCAmelCase_ : int = vae_state_dict['decoder.norm_out.bias']
UpperCAmelCase_ : Union[str, Any] = vae_state_dict['quant_conv.weight']
UpperCAmelCase_ : Dict = vae_state_dict['quant_conv.bias']
UpperCAmelCase_ : Union[str, Any] = vae_state_dict['post_quant_conv.weight']
UpperCAmelCase_ : Any = vae_state_dict['post_quant_conv.bias']
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ : Dict = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'encoder.down' in layer} )
UpperCAmelCase_ : List[Any] = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__snake_case )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ : Union[str, Any] = len({'.'.join(layer.split('.' )[:3] ) for layer in vae_state_dict if 'decoder.up' in layer} )
UpperCAmelCase_ : Any = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__snake_case )
}
for i in range(__snake_case ):
UpperCAmelCase_ : List[str] = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
UpperCAmelCase_ : Optional[int] = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
UpperCAmelCase_ : int = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
UpperCAmelCase_ : Dict = renew_vae_resnet_paths(__snake_case )
UpperCAmelCase_ : Union[str, Any] = {'old': F"down.{i}.block", 'new': F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
UpperCAmelCase_ : List[str] = [key for key in vae_state_dict if 'encoder.mid.block' in key]
UpperCAmelCase_ : Any = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ : Union[str, Any] = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__snake_case )
UpperCAmelCase_ : str = {'old': F"mid.block_{i}", 'new': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
UpperCAmelCase_ : int = [key for key in vae_state_dict if 'encoder.mid.attn' in key]
UpperCAmelCase_ : str = renew_vae_attention_paths(__snake_case )
UpperCAmelCase_ : Dict = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
for i in range(__snake_case ):
UpperCAmelCase_ : Dict = num_up_blocks - 1 - i
UpperCAmelCase_ : str = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
UpperCAmelCase_ : Optional[int] = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
UpperCAmelCase_ : int = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
UpperCAmelCase_ : List[str] = renew_vae_resnet_paths(__snake_case )
UpperCAmelCase_ : int = {'old': F"up.{block_id}.block", 'new': F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if 'decoder.mid.block' in key]
UpperCAmelCase_ : Tuple = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ : List[Any] = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
UpperCAmelCase_ : Tuple = renew_vae_resnet_paths(__snake_case )
UpperCAmelCase_ : List[str] = {'old': F"mid.block_{i}", 'new': F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
UpperCAmelCase_ : Optional[int] = [key for key in vae_state_dict if 'decoder.mid.attn' in key]
UpperCAmelCase_ : Dict = renew_vae_attention_paths(__snake_case )
UpperCAmelCase_ : str = {'old': 'mid.attn_1', 'new': 'mid_block.attentions.0'}
assign_to_checkpoint(__snake_case , __snake_case , __snake_case , additional_replacements=[meta_path] , config=__snake_case )
conv_attn_to_linear(__snake_case )
return new_checkpoint
def lowercase__ ( __snake_case : str , __snake_case : str , ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = requests.get(
' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml' )
UpperCAmelCase_ : Tuple = io.BytesIO(r.content )
UpperCAmelCase_ : Dict = OmegaConf.load(__snake_case )
UpperCAmelCase_ : int = 512
UpperCAmelCase_ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
if checkpoint_path.endswith('safetensors' ):
from safetensors import safe_open
UpperCAmelCase_ : Optional[Any] = {}
with safe_open(__snake_case , framework='pt' , device='cpu' ) as f:
for key in f.keys():
UpperCAmelCase_ : str = f.get_tensor(__snake_case )
else:
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case , map_location=__snake_case )['state_dict']
# Convert the VAE model.
UpperCAmelCase_ : str = create_vae_diffusers_config(__snake_case , image_size=__snake_case )
UpperCAmelCase_ : Any = custom_convert_ldm_vae_checkpoint(__snake_case , __snake_case )
UpperCAmelCase_ : Union[str, Any] = AutoencoderKL(**__snake_case )
vae.load_state_dict(__snake_case )
vae.save_pretrained(__snake_case )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__UpperCAmelCase = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 145 |
from collections import defaultdict
from math import ceil, sqrt
def lowercase__ ( __snake_case : int = 1_000_000 , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(__snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
UpperCAmelCase_ : Union[str, Any] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
UpperCAmelCase_ : int = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
_lowerCAmelCase : Any = "bert-base-cased"
_lowerCAmelCase : List[Any] = "fp16"
_lowerCAmelCase : int = "bf16"
_lowerCAmelCase : Tuple = [FPaa, BFaa]
@require_fsdp
@require_cuda
class _UpperCamelCase ( lowerCAmelCase ):
def UpperCAmelCase_ ( self :Optional[Any] ) -> int:
super().setUp()
UpperCAmelCase__ = dict(
ACCELERATE_USE_FSDP="true" , MASTER_ADDR="localhost" , MASTER_PORT="10999" , RANK="0" , LOCAL_RANK="0" , WORLD_SIZE="1" , )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(lowerCamelCase ):
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = f'''{i + 1}'''
UpperCAmelCase__ = strategy
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def UpperCAmelCase_ ( self :str ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(lowerCamelCase ):
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = prefetch_policy
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def UpperCAmelCase_ ( self :Optional[int] ) -> List[str]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(lowerCamelCase ):
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = state_dict_type
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def UpperCAmelCase_ ( self :Optional[int] ) -> Tuple:
UpperCAmelCase__ = AutoModel.from_pretrained(lowerCamelCase )
for policy in FSDP_AUTO_WRAP_POLICY:
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = policy
if policy == "TRANSFORMER_BASED_WRAP":
UpperCAmelCase__ = "BertLayer"
elif policy == "SIZE_BASED_WRAP":
UpperCAmelCase__ = "2000"
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = "TRANSFORMER_BASED_WRAP"
UpperCAmelCase__ = "T5Layer"
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
with self.assertRaises(lowerCamelCase ) as cm:
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase )
self.assertTrue("Could not find the transformer layer class to wrap in the model." in str(cm.exception ) )
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = "SIZE_BASED_WRAP"
UpperCAmelCase__ = "0"
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(lowerCamelCase )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def UpperCAmelCase_ ( self :int ) -> Any:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = mp_dtype
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = Accelerator()
if mp_dtype == "fp16":
UpperCAmelCase__ = torch.floataa
elif mp_dtype == "bf16":
UpperCAmelCase__ = torch.bfloataa
UpperCAmelCase__ = MixedPrecision(param_dtype=lowerCamelCase , reduce_dtype=lowerCamelCase , buffer_dtype=lowerCamelCase )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , lowerCamelCase )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , lowerCamelCase ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
UpperCAmelCase__ = self.dist_env.copy()
UpperCAmelCase__ = str(lowerCamelCase ).lower()
with mockenv_context(**lowerCamelCase ):
UpperCAmelCase__ = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=lowerCamelCase ) )
@require_fsdp
@require_multi_gpu
@slow
class _UpperCamelCase ( lowerCAmelCase ):
def UpperCAmelCase_ ( self :List[Any] ) -> Union[str, Any]:
super().setUp()
UpperCAmelCase__ = 0.82
UpperCAmelCase__ = [
"fsdp_shard_grad_op_transformer_based_wrap",
"fsdp_full_shard_transformer_based_wrap",
]
UpperCAmelCase__ = {
"multi_gpu_fp16": 3200,
"fsdp_shard_grad_op_transformer_based_wrap_fp16": 2000,
"fsdp_full_shard_transformer_based_wrap_fp16": 1900,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
UpperCAmelCase__ = 160
UpperCAmelCase__ = 160
UpperCAmelCase__ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase__ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "external_deps"] )
def UpperCAmelCase_ ( self :Optional[int] ) -> str:
UpperCAmelCase__ = os.path.join(self.test_scripts_folder , "test_performance.py" )
UpperCAmelCase__ = ["accelerate", "launch", "--num_processes=2", "--num_machines=1", "--machine_rank=0", "--use_fsdp"]
for config in self.performance_configs:
UpperCAmelCase__ = cmd.copy()
for i, strategy in enumerate(lowerCamelCase ):
if strategy.lower() in config:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "fp32" in config:
cmd_config.append("--mixed_precision=no" )
else:
cmd_config.append("--mixed_precision=fp16" )
if "cpu_offload" in config:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--performance_lower_bound={self.performance_lower_bound}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
def UpperCAmelCase_ ( self :Any ) -> List[str]:
UpperCAmelCase__ = os.path.join(self.test_scripts_folder , "test_checkpointing.py" )
UpperCAmelCase__ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
"--use_fsdp",
"--mixed_precision=fp16",
"--fsdp_transformer_layer_cls_to_wrap=BertLayer",
]
for i, strategy in enumerate(lowerCamelCase ):
UpperCAmelCase__ = cmd.copy()
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
if strategy != "FULL_SHARD":
continue
UpperCAmelCase__ = len(lowerCamelCase )
for state_dict_type in FSDP_STATE_DICT_TYPE:
UpperCAmelCase__ = cmd_config[:state_dict_config_index]
cmd_config.append(f'''--fsdp_state_dict_type={state_dict_type}''' )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
"--partial_train_epoch=1",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
UpperCAmelCase__ = cmd_config[:-1]
UpperCAmelCase__ = os.path.join(self.tmpdir , "epoch_0" )
cmd_config.extend(
[
f'''--resume_from_checkpoint={resume_from_checkpoint}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
def UpperCAmelCase_ ( self :Union[str, Any] ) -> int:
UpperCAmelCase__ = os.path.join(self.test_scripts_folder , "test_peak_memory_usage.py" )
UpperCAmelCase__ = [
"accelerate",
"launch",
"--num_processes=2",
"--num_machines=1",
"--machine_rank=0",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
UpperCAmelCase__ = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["--mixed_precision=fp16"] )
else:
cmd_config.extend(["--mixed_precision=no"] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["--use_fsdp"] )
for i, strategy in enumerate(lowerCamelCase ):
if strategy.lower() in spec:
cmd_config.append(f'''--fsdp_sharding_strategy={i+1}''' )
break
if "cpu_offload" in spec:
cmd_config.append("--fsdp_offload_params=True" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f'''--fsdp_auto_wrap_policy={policy}''' )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("--fsdp_transformer_layer_cls_to_wrap=BertLayer" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("--fsdp_min_num_params=2000" )
cmd_config.extend(
[
self.test_file_path,
f'''--output_dir={self.tmpdir}''',
f'''--peak_memory_upper_bound={peak_mem_upper_bound}''',
f'''--n_train={self.n_train}''',
f'''--n_val={self.n_val}''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCamelCase , env=os.environ.copy() )
| 169 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class _UpperCamelCase ( lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase_ = FlaxAutoencoderKL
@property
def UpperCAmelCase_ ( self :int ) -> Optional[int]:
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3
UpperCAmelCase__ = (32, 32)
UpperCAmelCase__ = jax.random.PRNGKey(0 )
UpperCAmelCase__ = jax.random.uniform(lowerCamelCase , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase_ ( self :Optional[Any] ) -> Any:
UpperCAmelCase__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
UpperCAmelCase__ = self.dummy_input
return init_dict, inputs_dict
| 169 | 1 |
'''simple docstring'''
from __future__ import annotations
def a ( __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :Tuple = len(__a )
# We need to create solution object to save path.
UpperCamelCase__ :List[str] = [[0 for _ in range(__a )] for _ in range(__a )]
UpperCamelCase__ :Optional[int] = run_maze(__a , 0 , 0 , __a )
if solved:
print('''\n'''.join(str(__a ) for row in solutions ) )
else:
print('''No solution exists!''' )
return solved
def a ( __a , __a , __a , __a ) -> bool:
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = len(__a )
# Final check point.
if i == j == (size - 1):
UpperCamelCase__ :Tuple = 1
return True
UpperCamelCase__ :Any = (not i < 0) and (not j < 0) # Check lower bounds
UpperCamelCase__ :List[Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
UpperCamelCase__ :Any = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
UpperCamelCase__ :Tuple = 1
# check for directions
if (
run_maze(__a , i + 1 , __a , __a )
or run_maze(__a , __a , j + 1 , __a )
or run_maze(__a , i - 1 , __a , __a )
or run_maze(__a , __a , j - 1 , __a )
):
return True
UpperCamelCase__ :Tuple = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 219 |
'''simple docstring'''
from __future__ import annotations
__snake_case = [True] * 1000001
__snake_case = 2
while i * i <= 1000000:
if seive[i]:
for j in range(i * i, 1000001, i):
__snake_case = False
i += 1
def a ( __a ) -> bool:
'''simple docstring'''
return seive[n]
def a ( __a ) -> bool:
'''simple docstring'''
return any(digit in '''02468''' for digit in str(__a ) )
def a ( __a = 1000000 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :Any = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(__a ) and not contains_an_even_digit(__a ):
UpperCamelCase__ :str = str(__a )
UpperCamelCase__ :List[str] = [int(str_num[j:] + str_num[:j] ) for j in range(len(__a ) )]
if all(is_prime(__a ) for i in list_nums ):
result.append(__a )
return result
def a ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""") | 219 | 1 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = WavaVecaForSequenceClassification.from_pretrained(a_ , config=a_)
snake_case_ = downstream_dict['projector.weight']
snake_case_ = downstream_dict['projector.bias']
snake_case_ = downstream_dict['model.post_net.linear.weight']
snake_case_ = downstream_dict['model.post_net.linear.bias']
return model
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = WavaVecaForAudioFrameClassification.from_pretrained(a_ , config=a_)
snake_case_ = downstream_dict['model.linear.weight']
snake_case_ = downstream_dict['model.linear.bias']
return model
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = WavaVecaForXVector.from_pretrained(a_ , config=a_)
snake_case_ = downstream_dict['connector.weight']
snake_case_ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel):
snake_case_ = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
snake_case_ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
snake_case_ = downstream_dict['objective.W']
return model
@torch.no_grad()
def __UpperCAmelCase ( a_ , a_ , a_ , a_):
snake_case_ = torch.load(a_ , map_location='cpu')
snake_case_ = checkpoint['Downstream']
snake_case_ = WavaVecaConfig.from_pretrained(a_)
snake_case_ = WavaVecaFeatureExtractor.from_pretrained(
a_ , return_attention_mask=a_ , do_normalize=a_)
snake_case_ = hf_config.architectures[0]
if arch.endswith('ForSequenceClassification'):
snake_case_ = convert_classification(a_ , a_ , a_)
elif arch.endswith('ForAudioFrameClassification'):
snake_case_ = convert_diarization(a_ , a_ , a_)
elif arch.endswith('ForXVector'):
snake_case_ = convert_xvector(a_ , a_ , a_)
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''')
if hf_config.use_weighted_layer_sum:
snake_case_ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(a_)
hf_model.save_pretrained(a_)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument(
"--base_model_name", default=None, type=str, help="Name of the huggingface pretrained base model."
)
parser.add_argument("--config_path", default=None, type=str, help="Path to the huggingface classifier config.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to the s3prl checkpoint.")
parser.add_argument("--model_dump_path", default=None, type=str, help="Path to the final converted model.")
lowercase = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 178 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __UpperCAmelCase ( a_ , a_ , a_ , a_ , a_):
snake_case_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(a_)])
snake_case_ = np.array(a_)
snake_case_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , a_)) , x.transpose()) , a_)
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2])
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = (1, 2, 1)
snake_case_ = (1, 1, 0, 7)
snake_case_ = SARIMAX(
a_ , exog=a_ , order=a_ , seasonal_order=a_)
snake_case_ = model.fit(disp=a_ , maxiter=6_00 , method='nm')
snake_case_ = model_fit.predict(1 , len(a_) , exog=[test_match])
return result[0]
def __UpperCAmelCase ( a_ , a_ , a_):
snake_case_ = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1)
regressor.fit(a_ , a_)
snake_case_ = regressor.predict(a_)
return y_pred[0]
def __UpperCAmelCase ( a_):
train_user.sort()
snake_case_ = np.percentile(a_ , 25)
snake_case_ = np.percentile(a_ , 75)
snake_case_ = qa - qa
snake_case_ = qa - (iqr * 0.1)
return low_lim
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = 0
for i in list_vote:
if i > actual_result:
snake_case_ = not_safe + 1
else:
if abs(abs(a_) - abs(a_)) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
lowercase = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase = normalize_df[:, 2].tolist()
lowercase = normalize_df[:, 0].tolist()
lowercase = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase = normalize_df[:, [1, 2]].tolist()
lowercase = x[: len(x) - 1]
lowercase = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase = total_date[: len(total_date) - 1]
lowercase = total_user[: len(total_user) - 1]
lowercase = total_match[: len(total_match) - 1]
lowercase = total_date[len(total_date) - 1 :]
lowercase = total_user[len(total_user) - 1 :]
lowercase = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 178 | 1 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ :List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""mask2former"""
UpperCamelCase__ : Tuple =["""swin"""]
UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =backbone_config.pop('model_type' )
__UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Dict =backbone_config
__UpperCamelCase : Optional[int] =feature_size
__UpperCamelCase : Union[str, Any] =mask_feature_size
__UpperCamelCase : Tuple =hidden_dim
__UpperCamelCase : Optional[int] =encoder_feedforward_dim
__UpperCamelCase : Optional[int] =activation_function
__UpperCamelCase : Dict =encoder_layers
__UpperCamelCase : List[Any] =decoder_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Optional[Any] =dropout
__UpperCamelCase : int =dim_feedforward
__UpperCamelCase : Any =pre_norm
__UpperCamelCase : Union[str, Any] =enforce_input_projection
__UpperCamelCase : str =common_stride
__UpperCamelCase : List[str] =ignore_value
__UpperCamelCase : Optional[int] =num_queries
__UpperCamelCase : Any =no_object_weight
__UpperCamelCase : int =class_weight
__UpperCamelCase : str =mask_weight
__UpperCamelCase : Dict =dice_weight
__UpperCamelCase : str =train_num_points
__UpperCamelCase : str =oversample_ratio
__UpperCamelCase : int =importance_sample_ratio
__UpperCamelCase : List[str] =init_std
__UpperCamelCase : Union[str, Any] =init_xavier_std
__UpperCamelCase : Any =use_auxiliary_loss
__UpperCamelCase : Tuple =feature_strides
__UpperCamelCase : Dict =output_auxiliary_logits
__UpperCamelCase : Union[str, Any] =decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[Any] =self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] =self.__class__.model_type
return output
| 245 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
A_ :List[str] = {
'''facebook/mask2former-swin-small-coco-instance''': (
'''https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json'''
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
A_ :int = logging.get_logger(__name__)
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] ="""mask2former"""
UpperCamelCase__ : Tuple =["""swin"""]
UpperCamelCase__ : Dict ={"""hidden_size""": """hidden_dim"""}
def __init__( self , lowerCamelCase__ = None , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 256 , lowerCamelCase__ = 1024 , lowerCamelCase__ = "relu" , lowerCamelCase__ = 6 , lowerCamelCase__ = 10 , lowerCamelCase__ = 8 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 2048 , lowerCamelCase__ = False , lowerCamelCase__ = False , lowerCamelCase__ = 4 , lowerCamelCase__ = 255 , lowerCamelCase__ = 100 , lowerCamelCase__ = 0.1 , lowerCamelCase__ = 2.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 5.0 , lowerCamelCase__ = 12544 , lowerCamelCase__ = 3.0 , lowerCamelCase__ = 0.75 , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1.0 , lowerCamelCase__ = True , lowerCamelCase__ = [4, 8, 16, 32] , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__UpperCamelCase : Optional[int] =CONFIG_MAPPING['swin'](
image_size=224 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=lowerCamelCase__ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__UpperCamelCase : List[str] =backbone_config.pop('model_type' )
__UpperCamelCase : str =CONFIG_MAPPING[backbone_model_type]
__UpperCamelCase : List[Any] =config_class.from_dict(lowerCamelCase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. '
f'Supported model types: {",".join(self.backbones_supported )}' )
__UpperCamelCase : Dict =backbone_config
__UpperCamelCase : Optional[int] =feature_size
__UpperCamelCase : Union[str, Any] =mask_feature_size
__UpperCamelCase : Tuple =hidden_dim
__UpperCamelCase : Optional[int] =encoder_feedforward_dim
__UpperCamelCase : Optional[int] =activation_function
__UpperCamelCase : Dict =encoder_layers
__UpperCamelCase : List[Any] =decoder_layers
__UpperCamelCase : int =num_attention_heads
__UpperCamelCase : Optional[Any] =dropout
__UpperCamelCase : int =dim_feedforward
__UpperCamelCase : Any =pre_norm
__UpperCamelCase : Union[str, Any] =enforce_input_projection
__UpperCamelCase : str =common_stride
__UpperCamelCase : List[str] =ignore_value
__UpperCamelCase : Optional[int] =num_queries
__UpperCamelCase : Any =no_object_weight
__UpperCamelCase : int =class_weight
__UpperCamelCase : str =mask_weight
__UpperCamelCase : Dict =dice_weight
__UpperCamelCase : str =train_num_points
__UpperCamelCase : str =oversample_ratio
__UpperCamelCase : int =importance_sample_ratio
__UpperCamelCase : List[str] =init_std
__UpperCamelCase : Union[str, Any] =init_xavier_std
__UpperCamelCase : Any =use_auxiliary_loss
__UpperCamelCase : Tuple =feature_strides
__UpperCamelCase : Dict =output_auxiliary_logits
__UpperCamelCase : Union[str, Any] =decoder_layers
super().__init__(**lowerCamelCase__ )
@classmethod
def __lowercase ( cls , lowerCamelCase__ , **lowerCamelCase__ ):
"""simple docstring"""
return cls(
backbone_config=lowerCamelCase__ , **lowerCamelCase__ , )
def __lowercase ( self ):
"""simple docstring"""
__UpperCamelCase : Any =copy.deepcopy(self.__dict__ )
__UpperCamelCase : List[Any] =self.backbone_config.to_dict()
__UpperCamelCase : Union[str, Any] =self.__class__.model_type
return output
| 245 | 1 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
a : Union[str, Any] = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class __UpperCamelCase ( tr.AbstractTransform ):
def __init__( self , lowerCAmelCase__ = " " ) -> str:
a : Optional[Any] = sentence_delimiter
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return list(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : Dict = []
for sent_idx, sentence in enumerate(lowerCAmelCase__ ):
chars.extend(self.process_string(lowerCAmelCase__ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowerCAmelCase__ ) - 1:
chars.append(self.sentence_delimiter )
return chars
a : Optional[int] = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
a : Any = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
a : Tuple = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
a : Any = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
a : Optional[Any] = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Optional[int]:
if concatenate_texts:
return jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )["wer"]
a : str = 0
a : int = 0
for prediction, reference in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[int] = jiwer.compute_measures(
lowerCAmelCase__ , lowerCAmelCase__ , truth_transform=lowerCAmelCase__ , hypothesis_transform=lowerCAmelCase__ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 105 |
"""simple docstring"""
import heapq as hq
import math
from collections.abc import Iterator
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[str] = str(id_ )
a : Optional[Any] = None
a : Tuple = None
a : str = []
a : Any = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase__ ) -> Any:
return self.key < other.key
def __repr__( self ) -> Optional[Any]:
return self.id
def __a ( self , lowerCAmelCase__ ) -> Any:
self.neighbors.append(lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : Optional[Any] = weight
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Union[str, Any] ) ->str:
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->list:
'''simple docstring'''
a : int = []
for u in graph:
a : List[str] = math.inf
a : int = None
a : str = 0
a : Union[str, Any] = graph[:]
while q:
a : List[Any] = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
a : List[Any] = u
a : Optional[int] = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _SCREAMING_SNAKE_CASE ( _lowercase : list , _lowercase : Vertex ) ->Iterator[tuple]:
'''simple docstring'''
for u in graph:
a : str = math.inf
a : Dict = None
a : Dict = 0
a : List[Any] = list(_lowercase )
hq.heapify(_lowercase )
while h:
a : Dict = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
a : Dict = u
a : Optional[Any] = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _SCREAMING_SNAKE_CASE ( ) ->None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __a ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __a ( unittest.TestCase ):
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = ort.SessionOptions()
UpperCamelCase__ : Tuple = False
return options
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCamelCase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCamelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : List[Any] = "A red cat sitting on a park bench"
UpperCamelCase__ : Optional[int] = np.random.RandomState(0 )
UpperCamelCase__ : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
UpperCamelCase__ : Dict = output.images
UpperCamelCase__ : Dict = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Tuple = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
UpperCamelCase__ : Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
UpperCamelCase__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" )
UpperCamelCase__ : Optional[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = "A red cat sitting on a park bench"
UpperCamelCase__ : List[Any] = np.random.RandomState(0 )
UpperCamelCase__ : Optional[int] = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
UpperCamelCase__ : Optional[int] = output.images
UpperCamelCase__ : List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
UpperCamelCase__ : Dict = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 | 366 |
lowerCamelCase : Optional[int] ={
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> list[str]:
UpperCamelCase__ : Optional[Any] = set()
# keep track of all the paths to be checked
UpperCamelCase__ : Optional[Any] = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
UpperCamelCase__ : int = queue.pop(0 )
# get the last node from the path
UpperCamelCase__ : Dict = path[-1]
if node not in explored:
UpperCamelCase__ : Tuple = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
UpperCamelCase__ : List[str] = list(__lowerCAmelCase )
new_path.append(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(__lowerCAmelCase )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
UpperCamelCase__ : Tuple = [start]
UpperCamelCase__ : Optional[int] = set(__lowerCAmelCase )
# Keep tab on distances from `start` node.
UpperCamelCase__ : str = {start: 0, target: -1}
while queue:
UpperCamelCase__ : Any = queue.pop(0 )
if node == target:
UpperCamelCase__ : Union[str, Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(__lowerCAmelCase )
queue.append(__lowerCAmelCase )
UpperCamelCase__ : List[Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4 | 196 | 0 |
UpperCAmelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def _a ( a :int ) -> int:
a = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase__ = [None] * 10000000
UpperCAmelCase__ = True
UpperCAmelCase__ = False
def _a ( a :int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
a = chain(next_number(a ) )
a = number_chain
while number < 10_000_000:
a = number_chain
number *= 10
return number_chain
def _a ( a :int = 10_000_000 ) -> int:
for i in range(1 , a ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE : Tuple = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE : Union[str, Any] = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class _lowerCamelCase:
lowercase_ : int
lowercase_ : Node | None
class _lowerCamelCase:
def __init__( self, lowerCamelCase) -> None:
"""simple docstring"""
_lowercase : Node | None = None
for i in sorted(lowerCamelCase, reverse=lowerCamelCase):
_lowercase : Tuple = Node(lowerCamelCase, self.head)
def __iter__( self) -> Iterator[int]:
"""simple docstring"""
_lowercase : Union[str, Any] = self.head
while node:
yield node.data
_lowercase : int = node.next_node
def __len__( self) -> int:
"""simple docstring"""
return sum(1 for _ in self)
def __str__( self) -> str:
"""simple docstring"""
return " -> ".join([str(lowerCamelCase) for node in self])
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> SortedLinkedList:
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : int = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 21 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowercase__ :List[str] = "\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
lowercase__ :str = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
lowercase__ :int = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def A__ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ,id='''token''') ,id='''sequence'''),
'''references''': datasets.Sequence(
datasets.Sequence(datasets.Value('''string''' ,id='''token''') ,id='''sequence''') ,id='''references'''),
}) ,)
def A__ ( self ,A__ ,A__ ,A__ = 1 ,A__ = 4 ,):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A__ ,hypotheses=A__ ,min_len=A__ ,max_len=A__)
}
| 97 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=7 ,A__=True ,A__=True ,A__=True ,A__=True ,A__=9_9 ,A__=1_6 ,A__=3_6 ,A__=6 ,A__=6 ,A__=6 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=5_1_2 ,A__=1_6 ,A__=2 ,A__=0.02 ,A__=3 ,A__=4 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = seq_length
lowercase = is_training
lowercase = use_input_mask
lowercase = use_token_type_ids
lowercase = use_labels
lowercase = vocab_size
lowercase = embedding_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_hidden_groups
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = type_vocab_size
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = num_labels
lowercase = num_choices
lowercase = scope
def A__ ( self):
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size)
lowercase = None
if self.use_input_mask:
lowercase = random_attention_mask([self.batch_size, self.seq_length])
lowercase = None
if self.use_token_type_ids:
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size)
lowercase = None
lowercase = None
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels)
lowercase = ids_tensor([self.batch_size] ,self.num_choices)
lowercase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self):
return AlbertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,num_hidden_groups=self.num_hidden_groups ,)
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__)
lowercase = model(A__ ,token_type_ids=A__)
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForPreTraining(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,sentence_order_label=A__ ,)
self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
self.parent.assertEqual(result.sop_logits.shape ,(self.batch_size, config.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForMaskedLM(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = AlbertForQuestionAnswering(config=A__)
model.to(A__)
model.eval()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,start_positions=A__ ,end_positions=A__ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForSequenceClassification(A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_labels
lowercase = AlbertForTokenClassification(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels))
def A__ ( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__):
lowercase = self.num_choices
lowercase = AlbertForMultipleChoice(config=A__)
model.to(A__)
model.eval()
lowercase = input_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = token_type_ids.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = input_mask.unsqueeze(1).expand(-1 ,self.num_choices ,-1).contiguous()
lowercase = model(
A__ ,attention_mask=A__ ,token_type_ids=A__ ,labels=A__ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) = config_and_inputs
lowercase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Union[str, Any] =(
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase_ : int =(
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase_ : str =True
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = super()._prepare_for_class(A__ ,A__ ,return_labels=A__)
if return_labels:
if model_class in get_values(A__):
lowercase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=A__)
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
return inputs_dict
def A__ ( self):
lowercase = AlbertModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,hidden_size=3_7)
def A__ ( self):
self.config_tester.run_common_tests()
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase = type
self.model_tester.create_and_check_model(*A__)
@slow
def A__ ( self):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = AlbertModel.from_pretrained(A__)
self.assertIsNotNone(A__)
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def A__ ( self):
lowercase = AlbertModel.from_pretrained('''albert-base-v2''')
lowercase = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowercase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
lowercase = model(A__ ,attention_mask=A__)[0]
lowercase = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape ,A__)
lowercase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A__ ,atol=1E-4))
| 97 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase :List[Any] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a__: int = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = AlbertTokenizer
__SCREAMING_SNAKE_CASE = AlbertTokenizerFast
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ = AlbertTokenizer(__lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = '''this is a test'''
A__ = '''this is a test'''
return input_text, output_text
def UpperCamelCase ( self ):
A__ = '''<pad>'''
A__ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCamelCase ),__lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCamelCase ),__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0],'''<pad>''' )
self.assertEqual(vocab_keys[1],'''<unk>''' )
self.assertEqual(vocab_keys[-1],'''▁eloquent''' )
self.assertEqual(len(__lowerCamelCase ),3_0000 )
def UpperCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size,3_0000 )
def UpperCamelCase ( self ):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(__lowerCamelCase )
A__ = rust_tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase,add_special_tokens=__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(__lowerCamelCase )
A__ = rust_tokenizer.encode(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = AlbertTokenizer(__lowerCamelCase,keep_accents=__lowerCamelCase )
A__ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(__lowerCamelCase,['''▁this''', '''▁is''', '''▁a''', '''▁test'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ),[48, 25, 21, 1289] )
A__ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
__lowerCamelCase,['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.'''] )
A__ = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase,[31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] )
A__ = tokenizer.convert_ids_to_tokens(__lowerCamelCase )
self.assertListEqual(
__lowerCamelCase,['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.'''],)
def UpperCamelCase ( self ):
A__ = AlbertTokenizer(__lowerCamelCase )
A__ = tokenizer.encode('''sequence builders''' )
A__ = tokenizer.encode('''multi-sequence build''' )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
A__ = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase,__lowerCamelCase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase ( self ):
# fmt: off
A__ = {'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''input_ids''': [[2, 2_1970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 1_2051, 18, 17, 7103, 2153, 673, 8, 3515, 1_8684, 8, 4461, 6, 1927, 297, 8, 1_2060, 2607, 18, 13, 5, 4461, 15, 1_0538, 38, 8, 135, 15, 822, 58, 15, 993, 1_0363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 1_0641, 6, 29, 84, 2512, 2430, 782, 1_8684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 1_1712, 15, 7103, 2153, 673, 17, 2_4883, 9990, 9, 3], [2, 1_1502, 25, 1006, 20, 782, 8, 1_1809, 855, 1732, 1_9393, 1_8667, 37, 367, 2_1018, 69, 1854, 34, 1_1860, 1_9124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 1_7659, 84, 14, 1_6792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowerCamelCase,model_name='''albert-base-v2''',revision='''6b6560eaf5ff2e250b00c50f380c5389a9c2d82e''',)
| 39 |
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : int )->float:
return base * power(UpperCamelCase__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
a__: List[str] = int(input('Enter the base: ').strip())
a__: Dict = int(input('Enter the exponent: ').strip())
a__: Optional[Any] = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
a__: Any = 1 / result
print(F"{base} to the power of {exponent} is {result}")
| 39 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/config.json''',
'''umberto-commoncrawl-cased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'''
),
'''umberto-wikipedia-uncased-v1''': (
'''https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'''
),
}
class lowercase ( A__ ):
"""simple docstring"""
_a = 'camembert'
def __init__( self , UpperCamelCase_=30522 , UpperCamelCase_=768 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3072 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=2 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
UpperCamelCase__ :int = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Optional[int] = num_hidden_layers
UpperCamelCase__ :List[Any] = num_attention_heads
UpperCamelCase__ :Union[str, Any] = hidden_act
UpperCamelCase__ :List[Any] = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :Tuple = attention_probs_dropout_prob
UpperCamelCase__ :Union[str, Any] = max_position_embeddings
UpperCamelCase__ :Tuple = type_vocab_size
UpperCamelCase__ :int = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
UpperCamelCase__ :int = position_embedding_type
UpperCamelCase__ :Any = use_cache
UpperCamelCase__ :Any = classifier_dropout
class lowercase ( A__ ):
"""simple docstring"""
@property
def lowerCAmelCase__ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase__ :List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCamelCase__ :Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] ) | 97 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
__snake_case = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''DPTFeatureExtractor''']
__snake_case = ['''DPTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DPTForDepthEstimation''',
'''DPTForSemanticSegmentation''',
'''DPTModel''',
'''DPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 97 | 1 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=99 ,_SCREAMING_SNAKE_CASE=36 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=16 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1_000 ,) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Dict = image_size
UpperCAmelCase_ : Dict = patch_size
UpperCAmelCase_ : Any = text_seq_length
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : List[Any] = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Dict = use_labels
UpperCAmelCase_ : int = vocab_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : int = hidden_act
UpperCAmelCase_ : Dict = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : Any = type_sequence_label_size
UpperCAmelCase_ : List[str] = initializer_range
UpperCAmelCase_ : int = coordinate_size
UpperCAmelCase_ : Union[str, Any] = shape_size
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : List[str] = num_choices
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : Dict = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase_ : List[Any] = text_seq_length
UpperCAmelCase_ : Optional[Any] = (image_size // patch_size) ** 2 + 1
UpperCAmelCase_ : Optional[int] = self.text_seq_length + self.image_seq_length
def a__ ( self ) -> Dict:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] ,self.vocab_size )
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] ,self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase_ : int = bbox[i, j, 3]
UpperCAmelCase_ : List[Any] = bbox[i, j, 1]
UpperCAmelCase_ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase_ : Any = bbox[i, j, 2]
UpperCAmelCase_ : Dict = bbox[i, j, 0]
UpperCAmelCase_ : Dict = t
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Dict = None
if self.use_input_mask:
UpperCAmelCase_ : int = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.text_seq_length] ,self.type_vocab_size )
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Optional[int] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.text_seq_length] ,self.num_labels )
UpperCAmelCase_ : int = LayoutLMvaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,coordinate_size=self.coordinate_size ,shape_size=self.shape_size ,input_size=self.image_size ,patch_size=self.patch_size ,)
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : str = LayoutLMvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# text + image
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase_ : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.image_seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : List[Any] = LayoutLMvaForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Tuple = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Optional[int] = self.num_labels
UpperCAmelCase_ : List[str] = LayoutLMvaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : str = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.text_seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ : int = LayoutLMvaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase_ : Dict = model(
_SCREAMING_SNAKE_CASE ,bbox=_SCREAMING_SNAKE_CASE ,pixel_values=_SCREAMING_SNAKE_CASE ,attention_mask=_SCREAMING_SNAKE_CASE ,token_type_ids=_SCREAMING_SNAKE_CASE ,start_positions=_SCREAMING_SNAKE_CASE ,end_positions=_SCREAMING_SNAKE_CASE ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Optional[int] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = LayoutLMvaModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = copy.deepcopy(_SCREAMING_SNAKE_CASE )
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = {
k: v.unsqueeze(1 ).expand(-1 ,self.model_tester.num_choices ,-1 ).contiguous()
if isinstance(_SCREAMING_SNAKE_CASE ,torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = torch.ones(self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in get_values(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(_SCREAMING_SNAKE_CASE ),
]:
UpperCAmelCase_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE )
elif model_class in [
*get_values(_SCREAMING_SNAKE_CASE ),
]:
UpperCAmelCase_ : int = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) ,dtype=torch.long ,device=_SCREAMING_SNAKE_CASE ,)
return inputs_dict
def a__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Any:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : Dict = type
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Dict = LayoutLMvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> int:
return LayoutLMvaImageProcessor(apply_ocr=_SCREAMING_SNAKE_CASE ) if is_vision_available() else None
@slow
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Optional[int] = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = self.default_image_processor
UpperCAmelCase_ : int = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).pixel_values.to(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = torch.tensor([[1, 2]] )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase_ : Optional[int] = model(
input_ids=input_ids.to(_SCREAMING_SNAKE_CASE ) ,bbox=bbox.to(_SCREAMING_SNAKE_CASE ) ,pixel_values=pixel_values.to(_SCREAMING_SNAKE_CASE ) ,)
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) ) | 235 |
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=100 ,_SCREAMING_SNAKE_CASE=13 ,_SCREAMING_SNAKE_CASE=30 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=True ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=5 ,_SCREAMING_SNAKE_CASE=4 ,_SCREAMING_SNAKE_CASE=37 ,_SCREAMING_SNAKE_CASE="gelu" ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=3 ,) -> Dict:
UpperCAmelCase_ : Union[str, Any] = parent
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : List[str] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Union[str, Any] = patch_size
UpperCAmelCase_ : Union[str, Any] = num_channels
UpperCAmelCase_ : Any = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Union[str, Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[int] = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
UpperCAmelCase_ : Dict = (image_size // patch_size) ** 2
UpperCAmelCase_ : List[str] = num_patches + 1
def a__ ( self ) -> str:
UpperCAmelCase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : Optional[int] = BeitConfig(
vocab_size=self.vocab_size ,image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,)
return config, pixel_values, labels
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = FlaxBeitForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length - 1, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ : Dict = self.type_sequence_label_size
UpperCAmelCase_ : int = FlaxBeitForImageClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Any = 1
UpperCAmelCase_ : List[Any] = FlaxBeitForImageClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : List[str] = config_and_inputs
UpperCAmelCase_ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_flax
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def a__ ( self ) -> None:
UpperCAmelCase_ : List[Any] = FlaxBeitModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def a__ ( self ) -> Optional[int]:
self.config_tester.run_common_tests()
def a__ ( self ) -> List[Any]:
UpperCAmelCase_, UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_, UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ):
return model(pixel_values=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
UpperCAmelCase_ : Dict = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
UpperCAmelCase_ : List[str] = model_jitted(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,len(_SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape ,output.shape )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class_name.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Optional[int] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@require_flax
class __a( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a__ ( self ) -> Dict:
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = FlaxBeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' )
UpperCAmelCase_ : List[Any] = self.default_image_processor
UpperCAmelCase_ : Optional[Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).pixel_values
# prepare bool_masked_pos
UpperCAmelCase_ : Union[str, Any] = np.ones((1, 196) ,dtype=_SCREAMING_SNAKE_CASE )
# forward pass
UpperCAmelCase_ : Optional[int] = model(pixel_values=_SCREAMING_SNAKE_CASE ,bool_masked_pos=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = outputs.logits
# verify the logits
UpperCAmelCase_ : List[str] = (1, 196, 8_192)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = np.array(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-2 ) )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' )
UpperCAmelCase_ : Any = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Optional[Any] = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Dict = (1, 1_000)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([-1.23_85, -1.09_87, -1.01_08] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 281
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : str = FlaxBeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : Dict = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors='''np''' )
# forward pass
UpperCAmelCase_ : Dict = model(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = outputs.logits
# verify the logits
UpperCAmelCase_ : Union[str, Any] = (1, 21_841)
self.assertEqual(logits.shape ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = np.array([1.68_81, -0.27_87, 0.59_01] )
self.assertTrue(np.allclose(logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
UpperCAmelCase_ : Dict = 2_396
self.assertEqual(logits.argmax(-1 ).item() ,_SCREAMING_SNAKE_CASE ) | 235 | 1 |
from __future__ import annotations
def lowerCamelCase__ ( A__ : int , A__ : int ):
'''simple docstring'''
__lowerCamelCase = []
create_all_state(1 , A__ , A__ , [] , A__ )
return result
def lowerCamelCase__ ( A__ : int , A__ : int , A__ : int , A__ : list[int] , A__ : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A__ , total_number - level + 2 ):
current_list.append(A__ )
create_all_state(i + 1 , A__ , level - 1 , A__ , A__ )
current_list.pop()
def lowerCamelCase__ ( A__ : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A__ )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 12 |
from math import factorial
def A_ ( snake_case : int = 100 ) -> int:
'''simple docstring'''
return sum(int(snake_case ) for x in str(factorial(snake_case ) ) )
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 328 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
def __UpperCamelCase ( ):
lowercase__ : Optional[int] = {}
lowercase__ : Optional[int] = 2
while True:
lowercase__ : Optional[int] = factor_map.pop(UpperCAmelCase_ , UpperCAmelCase_ )
if factor:
lowercase__ : Union[str, Any] = factor + prime
while x in factor_map:
x += factor
lowercase__ : Tuple = factor
else:
lowercase__ : Dict = prime
yield prime
prime += 1
def __UpperCamelCase ( UpperCAmelCase = 1E10 ):
lowercase__ : Tuple = sieve()
lowercase__ : Union[str, Any] = 1
while True:
lowercase__ : int = next(UpperCAmelCase_ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(UpperCAmelCase_ )
n += 2
if __name__ == "__main__":
print(solution())
| 353 | '''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('''dataset_size''' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('''input_in_memory_max_size''' , ['''default''', 0, 100 * 2**20, 900 * 2**20] )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , '''IN_MEMORY_MAX_SIZE''' , UpperCAmelCase )
lowercase__ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase__ : str = dataset_size < in_memory_max_size
else:
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = is_small_dataset(UpperCAmelCase )
assert result == expected
| 214 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__snake_case ={
"""configuration_xlm""": ["""XLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMConfig""", """XLMOnnxConfig"""],
"""tokenization_xlm""": ["""XLMTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMForMultipleChoice""",
"""XLMForQuestionAnswering""",
"""XLMForQuestionAnsweringSimple""",
"""XLMForSequenceClassification""",
"""XLMForTokenClassification""",
"""XLMModel""",
"""XLMPreTrainedModel""",
"""XLMWithLMHeadModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLMForMultipleChoice""",
"""TFXLMForQuestionAnsweringSimple""",
"""TFXLMForSequenceClassification""",
"""TFXLMForTokenClassification""",
"""TFXLMMainLayer""",
"""TFXLMModel""",
"""TFXLMPreTrainedModel""",
"""TFXLMWithLMHeadModel""",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 4 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class SCREAMING_SNAKE_CASE__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ = 1.0, lowerCamelCase__ = None, ):
super().__init__()
A : Union[str, Any] = initial_learning_rate
A : List[Any] = warmup_steps
A : int = power
A : Optional[int] = decay_schedule_fn
A : int = name
def __call__( self, lowerCamelCase__ ):
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
A : str = tf.cast(lowerCamelCase__, tf.floataa )
A : List[Any] = tf.cast(self.warmup_steps, tf.floataa )
A : Dict = global_step_float / warmup_steps_float
A : Union[str, Any] = self.initial_learning_rate * tf.math.pow(lowerCamelCase__, self.power )
return tf.cond(
global_step_float < warmup_steps_float, lambda: warmup_learning_rate, lambda: self.decay_schedule_fn(step - self.warmup_steps ), name=lowerCamelCase__, )
def _lowerCAmelCase ( self ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 0.9 , _lowerCAmelCase = 0.999 , _lowerCAmelCase = 1e-8 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = None , ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_lowerCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_lowerCAmelCase , )
if num_warmup_steps:
A : Dict = WarmUp(
initial_learning_rate=_lowerCAmelCase , decay_schedule_fn=_lowerCAmelCase , warmup_steps=_lowerCAmelCase , )
if weight_decay_rate > 0.0:
A : str = AdamWeightDecay(
learning_rate=_lowerCAmelCase , weight_decay_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=_lowerCAmelCase , )
else:
A : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=_lowerCAmelCase , beta_a=_lowerCAmelCase , beta_a=_lowerCAmelCase , epsilon=_lowerCAmelCase , clipnorm=_lowerCAmelCase , global_clipnorm=_lowerCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self, lowerCamelCase__ = 0.001, lowerCamelCase__ = 0.9, lowerCamelCase__ = 0.999, lowerCamelCase__ = 1e-7, lowerCamelCase__ = False, lowerCamelCase__ = 0.0, lowerCamelCase__ = None, lowerCamelCase__ = None, lowerCamelCase__ = "AdamWeightDecay", **lowerCamelCase__, ):
super().__init__(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
A : int = weight_decay_rate
A : Any = include_in_weight_decay
A : Dict = exclude_from_weight_decay
@classmethod
def _lowerCAmelCase ( cls, lowerCamelCase__ ):
A : Tuple = {"""WarmUp""": WarmUp}
return super(lowerCamelCase__, cls ).from_config(lowerCamelCase__, custom_objects=lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
super(lowerCamelCase__, self )._prepare_local(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
A : List[str] = tf.constant(
self.weight_decay_rate, name="""adam_weight_decay_rate""" )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
A : Any = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""], use_locking=self._use_locking, )
return tf.no_op()
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__=None, **lowerCamelCase__ ):
A , A : Dict = list(zip(*lowerCamelCase__ ) )
return super(lowerCamelCase__, self ).apply_gradients(zip(lowerCamelCase__, lowerCamelCase__ ), name=lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
A : Union[str, Any] = apply_state or {}
A : Optional[int] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
A : Dict = self._fallback_apply_state(lowerCamelCase__, lowerCamelCase__ )
A : List[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : str = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Any = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_dense(lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__=None ):
A , A : Tuple = self._get_lr(var.device, var.dtype.base_dtype, lowerCamelCase__ )
A : Optional[Any] = self._decay_weights_op(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
with tf.control_dependencies([decay] ):
return super(lowerCamelCase__, self )._resource_apply_sparse(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ )
def _lowerCAmelCase ( self ):
A : Dict = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _lowerCAmelCase ( self, lowerCamelCase__ ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(lowerCamelCase__, lowerCamelCase__ ) is not None:
return False
return True
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self ):
A : List[str] = []
A : List[str] = None
@property
def _lowerCAmelCase ( self ):
if self._accum_steps is None:
A : str = tf.Variable(
tf.constant(0, dtype=tf.intaa ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
return self._accum_steps.value()
@property
def _lowerCAmelCase ( self ):
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self, lowerCamelCase__ ):
if not self._gradients:
A : int = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(lowerCamelCase__ ), trainable=lowerCamelCase__, synchronization=tf.VariableSynchronization.ON_READ, aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA, )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(lowerCamelCase__ ) != len(self._gradients ):
raise ValueError(f'''Expected {len(self._gradients )} gradients, but got {len(lowerCamelCase__ )}''' )
for accum_gradient, gradient in zip(self._gradients, lowerCamelCase__ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(lowerCamelCase__ )
self._accum_steps.assign_add(1 )
def _lowerCAmelCase ( self ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(lowerCamelCase__ ) )
| 116 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__snake_case : List[Any] = None
__snake_case : str = logging.get_logger(__name__)
__snake_case : Optional[Any] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__snake_case : Optional[int] = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__snake_case : Tuple = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
__snake_case : List[str] = '▁'
class __UpperCAmelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowercase : Union[str, Any] = VOCAB_FILES_NAMES
__lowercase : int = PRETRAINED_VOCAB_FILES_MAP
__lowercase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : List[Any] = BigBirdTokenizer
__lowercase : str = ['input_ids', 'attention_mask']
__lowercase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE="[CLS]" , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
A_ = vocab_file
A_ = False if not self.vocab_file else True
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A_ = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 18 | '''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __UpperCAmelCase :
'''simple docstring'''
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=[0, 1, 2, 3] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=[1, 384, 24, 24] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , ) -> Tuple:
A_ = parent
A_ = batch_size
A_ = image_size
A_ = patch_size
A_ = num_channels
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = backbone_out_indices
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = initializer_range
A_ = num_labels
A_ = backbone_featmap_shape
A_ = scope
A_ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
A_ = (image_size // patch_size) ** 2
A_ = num_patches + 1
def __A ( self ) -> Optional[Any]:
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A_ = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Optional[Any]:
A_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 192, 384, 768],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_SCREAMING_SNAKE_CASE , backbone_featmap_shape=self.backbone_featmap_shape , )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
A_ = DPTModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
A_ = self.num_labels
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
A_ = self.num_labels
A_ = DPTForSemanticSegmentation(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
A_ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __A ( self ) -> Optional[int]:
A_ = self.prepare_config_and_inputs()
A_ ,A_ ,A_ = config_and_inputs
A_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : List[Any] = False
def __A ( self ) -> Tuple:
A_ = DPTModelTester(self )
A_ = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __A ( self ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def __A ( self ) -> Union[str, Any]:
pass
def __A ( self ) -> Dict:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __A ( self ) -> Optional[int]:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_SCREAMING_SNAKE_CASE )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> str:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[Any]:
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Any:
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = False
A_ = True
if model_class in get_values(_SCREAMING_SNAKE_CASE ) or not model_class.supports_gradient_checkpointing:
continue
A_ = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
A_ = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
A_ = model(**_SCREAMING_SNAKE_CASE ).loss
loss.backward()
def __A ( self ) -> Tuple:
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
A_ = model_class(config=_SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
A_ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
A_ = [F'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __A ( self ) -> int:
pass
@slow
def __A ( self ) -> Dict:
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
A_ = DPTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __A ( self ) -> Optional[int]:
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
A_ ,A_ = self.model_tester.prepare_config_and_inputs_for_common()
A_ = '''add'''
with self.assertRaises(_SCREAMING_SNAKE_CASE ):
A_ = DPTForDepthEstimation(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( ) -> Optional[int]:
A_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __A ( self ) -> Any:
A_ = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
A_ = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_SCREAMING_SNAKE_CASE )
A_ = prepare_img()
A_ = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
A_ = model(**_SCREAMING_SNAKE_CASE )
A_ = outputs.predicted_depth
# verify the predicted depth
A_ = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape , _SCREAMING_SNAKE_CASE )
A_ = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 18 | 1 |
'''simple docstring'''
from math import sqrt
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int = 1000000 ) -> int:
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2, 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE__, sum_shortest_sides // 2 )
- max(1, sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 125 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125 | 1 |
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Any , UpperCAmelCase__ : int = 7_6_8 , ) -> int:
super().__init__()
lowerCAmelCase = nn.Parameter(torch.zeros(1 , UpperCAmelCase__ ) )
lowerCAmelCase = nn.Parameter(torch.ones(1 , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Union[str, torch.device]] = None , UpperCAmelCase__ : Optional[torch.dtype] = None , ) -> Optional[Any]:
lowerCAmelCase = nn.Parameter(self.mean.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) )
lowerCAmelCase = nn.Parameter(self.std.to(UpperCAmelCase__ ).to(UpperCAmelCase__ ) )
return self
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Any ) -> List[Any]:
lowerCAmelCase = (embeds - self.mean) * 1.0 / self.std
return embeds
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : str ) -> Any:
lowerCAmelCase = (embeds * self.std) + self.mean
return embeds
| 55 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple ) -> Tuple:
# test for the above condition
self.test()
def __UpperCAmelCase ( self : Any ) -> Tuple:
lowerCAmelCase = 0
lowerCAmelCase = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase = self.advance()
if not self.does_advance(UpperCAmelCase__ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.update(UpperCAmelCase__ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __UpperCAmelCase ( self : Dict ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Dict:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Any ) -> Tuple:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[Any]=False ) -> Union[str, Any]:
raise NotImplementedError(
F'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : str , UpperCAmelCase__ : List[int] ) -> Union[str, Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
lowerCAmelCase = token_ids
lowerCAmelCase = len(self.token_ids )
lowerCAmelCase = -1 # the index of the currently fulfilled step
lowerCAmelCase = False
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : int ) -> int:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : int ) -> List[str]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` has to be an `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.fulfilled_idx += 1
lowerCAmelCase = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase = True
lowerCAmelCase = completed
else:
# failed to make progress.
lowerCAmelCase = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : int ) -> List[str]:
lowerCAmelCase = False
lowerCAmelCase = 0
def __UpperCAmelCase ( self : Dict ) -> List[str]:
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[str]=False ) -> Optional[int]:
lowerCAmelCase = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.fulfilled_idx
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : str , UpperCAmelCase__ : List[List[int]] , UpperCAmelCase__ : str=True ) -> str:
lowerCAmelCase = max([len(UpperCAmelCase__ ) for one in nested_token_ids] )
lowerCAmelCase = {}
for token_ids in nested_token_ids:
lowerCAmelCase = root
for tidx, token_id in enumerate(UpperCAmelCase__ ):
if token_id not in level:
lowerCAmelCase = {}
lowerCAmelCase = level[token_id]
if no_subsets and self.has_subsets(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F''' {nested_token_ids}.''' )
lowerCAmelCase = root
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase = self.trie
for current_token in current_seq:
lowerCAmelCase = start[current_token]
lowerCAmelCase = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = self.next_tokens(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) == 0
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowerCAmelCase = list(root.values() )
if len(UpperCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(UpperCAmelCase__ ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) -> List[Any]:
lowerCAmelCase = self.count_leaves(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) != leaf_count
class UpperCAmelCase_ ( __lowercase ):
def __init__( self : Tuple , UpperCAmelCase__ : List[List[int]] ) -> List[Any]:
super(UpperCAmelCase__ , self ).__init__()
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or len(UpperCAmelCase__ ) == 0:
raise ValueError(F'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(F'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
lowerCAmelCase = DisjunctiveTrie(UpperCAmelCase__ )
lowerCAmelCase = nested_token_ids
lowerCAmelCase = self.trie.max_height
lowerCAmelCase = []
lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : int ) -> Any:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : int ) -> Tuple:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(UpperCAmelCase__ )}''' )
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
if self.does_advance(UpperCAmelCase__ ):
self.current_seq.append(UpperCAmelCase__ )
lowerCAmelCase = True
else:
lowerCAmelCase = True
self.reset()
lowerCAmelCase = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Optional[int] ) -> int:
lowerCAmelCase = False
lowerCAmelCase = []
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[Any]=False ) -> List[Any]:
lowerCAmelCase = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase = self.seqlen
lowerCAmelCase = self.current_seq
lowerCAmelCase = self.completed
return new_constraint
class UpperCAmelCase_ :
def __init__( self : Tuple , UpperCAmelCase__ : List[Constraint] ) -> str:
lowerCAmelCase = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase = max([c.seqlen for c in constraints] )
lowerCAmelCase = len(UpperCAmelCase__ )
lowerCAmelCase = False
self.init_state()
def __UpperCAmelCase ( self : List[str] ) -> List[str]:
lowerCAmelCase = []
lowerCAmelCase = None
lowerCAmelCase = [constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.constraints]
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
lowerCAmelCase = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase = constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
else:
lowerCAmelCase = self.inprogress_constraint.advance()
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.append(UpperCAmelCase__ )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
token_list.extend(UpperCAmelCase__ )
if len(UpperCAmelCase__ ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : Optional[List[int]] ) -> Dict:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase , lowerCAmelCase = self.add(UpperCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : int ) -> Optional[Any]:
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
raise ValueError(F'''`token_id` should be an `int`, but is `{token_id}`.''' )
lowerCAmelCase , lowerCAmelCase = False, False
if self.completed:
lowerCAmelCase = True
lowerCAmelCase = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.inprogress_constraint.update(UpperCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=UpperCAmelCase__ ) )
lowerCAmelCase = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = pending_constraint.update(UpperCAmelCase__ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(UpperCAmelCase__ )
lowerCAmelCase = None
if not complete and stepped:
lowerCAmelCase = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Union[str, Any]=True ) -> Optional[int]:
lowerCAmelCase = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase = [
constraint.copy(stateful=UpperCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase = self.inprogress_constraint.copy(stateful=UpperCAmelCase__ )
lowerCAmelCase = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 55 | 1 |
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCAmelCase : Dict =get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCamelCase__ , unittest.TestCase ):
__lowercase = XLNetTokenizer
__lowercase = XLNetTokenizerFast
__lowercase = True
__lowercase = True
def UpperCAmelCase_ ( self :Tuple )-> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A__ = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[Any]:
A__ = "<s>"
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowercase_ ) , lowercase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowercase_ ) , lowercase_ )
def UpperCAmelCase_ ( self :Any )-> Optional[int]:
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(lowercase_ ) , 10_06 )
def UpperCAmelCase_ ( self :Tuple )-> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def UpperCAmelCase_ ( self :int )-> Union[str, Any]:
A__ = XLNetTokenizer(lowercase_ , keep_accents=lowercase_ )
A__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowercase_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase_ ) , [2_85, 46, 10, 1_70, 3_82] )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ = tokenizer.convert_tokens_to_ids(lowercase_ )
self.assertListEqual(lowercase_ , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
A__ = tokenizer.convert_ids_to_tokens(lowercase_ )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def UpperCAmelCase_ ( self :Any )-> Union[str, Any]:
A__ = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def UpperCAmelCase_ ( self :Optional[Any] )-> int:
A__ = XLNetTokenizer(lowercase_ , do_lower_case=lowercase_ )
A__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowercase_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def UpperCAmelCase_ ( self :Optional[Any] )-> List[Any]:
A__ = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
A__ = tokenizer.encode("sequence builders" , add_special_tokens=lowercase_ )
A__ = tokenizer.encode("multi-sequence build" , add_special_tokens=lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ )
A__ = tokenizer.build_inputs_with_special_tokens(lowercase_ , lowercase_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def UpperCAmelCase_ ( self :Union[str, Any] )-> List[str]:
# fmt: off
A__ = {"input_ids": [[17, 2_14_42, 2_70, 17, 10, 1_46_45, 3_18, 34, 17, 45_46, 31_45, 7_87, 13, 77_52, 2_20_18, 23, 21, 17, 45_46, 31_45, 7_87, 13, 33_52, 1_44_31, 13, 55_00, 11, 11_76, 5_80, 13, 1_68_19, 47_97, 23, 17, 10, 1_71_35, 6_58, 19, 4_57, 79_32, 13, 1_84, 19, 31_54, 1_71_35, 64_68, 19, 14_04, 1_22_69, 19, 42_29, 53_56, 1_62_64, 46, 19, 17, 2_05_45, 1_03_95, 9, 9, 9, 11, 28, 64_21, 95_31, 2_07_29, 17, 10, 3_53, 1_70_22, 11, 21, 64_21, 95_31, 1_69_49, 17, 10, 1_15_09, 7_53, 11, 33, 95, 24_21, 73_85, 9_56, 1_44_31, 26_26, 25, 8_42, 73_85, 48_36, 21, 14_29, 22_72, 98_55, 31_20, 1_61, 2_47_38, 19, 1_32_03, 6_58, 2_18, 7_87, 21, 4_30, 1_84_82, 8_47, 26_37, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_22, 2_21_78, 27, 10_64, 22, 9_56, 13, 1_11_01, 14_29, 58_54, 2_43_13, 1_89_53, 40, 4_22, 2_43_66, 68, 17_58, 37, 1_04_83, 1_42_57, 31, 2_07, 2_63, 21, 2_03, 37_73, 25, 71, 97_35, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 20_49, 34_42, 17, 1_38_94, 33_80, 23, 95, 18, 1_76_34, 22_88, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowercase_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 237 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def UpperCamelCase ( _lowerCamelCase : list[list[float]] ):
A__ = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(_lowerCamelCase ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
A__ = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
A__ = [[0.0, 0.0], [0.0, 0.0]]
A__, A__ = matrix[1][1], matrix[0][0]
A__, A__ = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(_lowerCamelCase ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(_lowerCamelCase ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
A__ = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
A__ = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
A__ = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
A__ = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
A__ = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
A__ = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
A__ = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
A__ = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
A__ = array(_lowerCamelCase )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(_lowerCamelCase )
# Calculate the inverse of the matrix
return [[float(d(_lowerCamelCase ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 237 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Optional[int] = {
"configuration_perceiver": ["PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP", "PerceiverConfig", "PerceiverOnnxConfig"],
"tokenization_perceiver": ["PerceiverTokenizer"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["PerceiverFeatureExtractor"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ["PerceiverImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : List[str] = [
"PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST",
"PerceiverForImageClassificationConvProcessing",
"PerceiverForImageClassificationFourier",
"PerceiverForImageClassificationLearned",
"PerceiverForMaskedLM",
"PerceiverForMultimodalAutoencoding",
"PerceiverForOpticalFlow",
"PerceiverForSequenceClassification",
"PerceiverLayer",
"PerceiverModel",
"PerceiverPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mgp-str'
def __init__( self , __snake_case=[3_2, 1_2_8] , __snake_case=4 , __snake_case=3 , __snake_case=2_7 , __snake_case=3_8 , __snake_case=5_0_2_5_7 , __snake_case=3_0_5_2_2 , __snake_case=7_6_8 , __snake_case=1_2 , __snake_case=1_2 , __snake_case=4.0 , __snake_case=True , __snake_case=False , __snake_case=1E-5 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=0.0 , __snake_case=False , __snake_case=0.02 , **__snake_case , ):
super().__init__(**__snake_case )
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = max_token_length
snake_case = num_character_labels
snake_case = num_bpe_labels
snake_case = num_wordpiece_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = mlp_ratio
snake_case = distilled
snake_case = layer_norm_eps
snake_case = drop_rate
snake_case = qkv_bias
snake_case = attn_drop_rate
snake_case = drop_path_rate
snake_case = output_aa_attentions
snake_case = initializer_range
| 213 | 0 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=2_2_4 , __UpperCamelCase=1_0_0_0 , __UpperCamelCase=[3, 3, 6, 4] , __UpperCamelCase=[4_8, 5_6, 1_1_2, 2_2_0] , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = num_channels
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = num_labels
UpperCamelCase_ = image_size
UpperCamelCase_ = layer_depths
UpperCamelCase_ = embed_dims
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = None
if self.use_labels:
UpperCamelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase_ = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=__UpperCamelCase , layer_scale_init_value=1e-5 , )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = SwiftFormerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = self.num_labels
UpperCamelCase_ = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCamelCase_ = SwiftFormerForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
((UpperCamelCase_) , (UpperCamelCase_) , (UpperCamelCase_)) = self.prepare_config_and_inputs()
UpperCamelCase_ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Optional[int] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
A__ : List[str] = (
{"""feature-extraction""": SwiftFormerModel, """image-classification""": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
A__ : Any = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = False
A__ : Tuple = False
A__ : List[Any] = False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = SwiftFormerModelTester(self )
UpperCamelCase_ = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
UpperCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase_ = [*signature.parameters.keys()]
UpperCamelCase_ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ = SwiftFormerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self ):
"""simple docstring"""
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCamelCase_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
UpperCamelCase_ = outputs.hidden_states
UpperCamelCase_ = 8
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(__UpperCamelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
def _config_zero_init(__UpperCamelCase ):
UpperCamelCase_ = copy.deepcopy(__UpperCamelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(__UpperCamelCase , __UpperCamelCase , 1e-10 )
if isinstance(getattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase ):
UpperCamelCase_ = _config_zero_init(getattr(__UpperCamelCase , __UpperCamelCase ) )
setattr(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return configs_no_init
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
pass
def lowerCamelCase__ ( ) -> Union[str, Any]:
UpperCamelCase_ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(__UpperCamelCase )
UpperCamelCase_ = self.default_image_processor
UpperCamelCase_ = prepare_img()
UpperCamelCase_ = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )
# verify the logits
UpperCamelCase_ = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
UpperCamelCase_ = torch.tensor([[-2.17_03e00, 2.11_07e00, -2.08_11e00]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 122 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Dict = BarthezTokenizer
A__ : List[Any] = BarthezTokenizerFast
A__ : int = True
A__ : str = True
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().setUp()
UpperCamelCase_ = BarthezTokenizerFast.from_pretrained("""moussaKam/mbarthez""" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCamelCase )
UpperCamelCase_ = tokenizer
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = """<pad>"""
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(__UpperCamelCase ) , 1_0_1_1_2_2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_1_2_2 )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCamelCase_ = [0, 5_7, 3_0_1_8, 7_0_3_0_7, 9_1, 2]
UpperCamelCase_ = self.tokenizer(
__UpperCamelCase , max_length=len(__UpperCamelCase ) , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
UpperCamelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase_ = self.get_tokenizer()
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = """I was born in 92000, and this is falsé."""
UpperCamelCase_ = tokenizer.tokenize(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ = self.get_rust_tokenizer()
UpperCamelCase_ = tokenizer.encode(__UpperCamelCase )
UpperCamelCase_ = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = {"""input_ids""": [[0, 4_9_0, 1_4_3_2_8, 4_5_0_7, 3_5_4, 4_7, 4_3_6_6_9, 9_5, 2_5, 7_8_1_1_7, 2_0_2_1_5, 1_9_7_7_9, 1_9_0, 2_2, 4_0_0, 4, 3_5_3_4_3, 8_0_3_1_0, 6_0_3, 8_6, 2_4_9_3_7, 1_0_5, 3_3_4_3_8, 9_4_7_6_2, 1_9_6, 3_9_6_4_2, 7, 1_5, 1_5_9_3_3, 1_7_3, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0_5_3_4, 8_7, 2_5, 6_6, 3_3_5_8, 1_9_6, 5_5_2_8_9, 8, 8_2_9_6_1, 8_1, 2_2_0_4, 7_5_2_0_3, 7, 1_5, 7_6_3, 1_2_9_5_6, 2_1_6, 1_7_8, 1_4_3_2_8, 9_5_9_5, 1_3_7_7, 6_9_6_9_3, 7, 4_4_8, 7_1_0_2_1, 1_9_6, 1_8_1_0_6, 1_4_3_7, 1_3_9_7_4, 1_0_8, 9_0_8_3, 4, 4_9_3_1_5, 7, 3_9, 8_6, 1_3_2_6, 2_7_9_3, 4_6_3_3_3, 4, 4_4_8, 1_9_6, 7_4_5_8_8, 7, 4_9_3_1_5, 7, 3_9, 2_1, 8_2_2, 3_8_4_7_0, 7_4, 2_1, 6_6_7_2_3, 6_2_4_8_0, 8, 2_2_0_5_0, 5, 2]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase_ = [
"""Le transformeur est un modèle d'apprentissage profond introduit en 2017, """
"""utilisé principalement dans le domaine du traitement automatique des langues (TAL).""",
"""À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus """
"""pour gérer des données séquentielles, telles que le langage naturel, pour des tâches """
"""telles que la traduction et la synthèse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""moussaKam/mbarthez""" , revision="""c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6""" , sequences=__UpperCamelCase , )
| 122 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase , lowerCamelCase__ ):
"""simple docstring"""
def __lowerCamelCase ( self ) -> int:
'''simple docstring'''
__UpperCamelCase : List[Any] = load_tool("text-to-speech" )
self.tool.setup()
def __lowerCamelCase ( self ) -> str:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : Any = self.tool("hey" )
__UpperCamelCase : str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__UpperCamelCase : str = self.tool("hey" )
__UpperCamelCase : Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) ) | 361 |
from math import sqrt
def UpperCAmelCase_ (_lowerCAmelCase : int = 1_00_00_00 ):
__UpperCamelCase : int = 0
__UpperCamelCase : int = 0
__UpperCamelCase : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(_lowerCAmelCase , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(F"""{solution() = }""") | 171 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
_a = logging.get_logger(__name__)
enable_full_determinism()
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] ).to(UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model.to(UpperCAmelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase )
model_accelerate.to(UpperCAmelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
_UpperCAmelCase = model_accelerate(UpperCAmelCase , UpperCAmelCase )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCAmelCase , low_cpu_mem_usage=UpperCAmelCase )
model_normal_load.to(UpperCAmelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(UpperCAmelCase , UpperCAmelCase )['sample']
assert torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCAmelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-3 ) )
class __lowerCamelCase ( snake_case__ , snake_case__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel
UpperCamelCase__ = "sample"
@property
def UpperCamelCase ( self , UpperCAmelCase=(32, 32) ):
"""simple docstring"""
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCAmelCase )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCAmelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (256, 256) ).to(UpperCAmelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**UpperCAmelCase )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (256, 256)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCAmelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(UpperCAmelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(UpperCAmelCase )
with torch.no_grad():
_UpperCAmelCase = model(UpperCAmelCase , UpperCAmelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(UpperCAmelCase , UpperCAmelCase , rtol=1e-2 ) )
def UpperCamelCase ( self ):
"""simple docstring"""
pass
| 39 |
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
_UpperCAmelCase = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase , cache_dir=UpperCAmelCase )
_UpperCAmelCase = [t[-1] for t in os.walk(os.path.join(UpperCAmelCase , os.listdir(UpperCAmelCase )[0] , 'snapshots' ) )]
_UpperCAmelCase = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 4
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_51_47_45 ) < 1e-3
assert np.abs(np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 4_99_47.8_75 ) < 5e-1
_UpperCAmelCase = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(UpperCAmelCase ) == num_samples
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.05_65_24_01) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_38_38_08.2) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.04_00_39_06) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_37_35_16.75) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=UpperCAmelCase , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = scheduler.create_state()
_UpperCAmelCase = scheduler_state
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.random.PRNGKey(0 )
_UpperCAmelCase = 50
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
# shard inputs and rng
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = jax.random.split(UpperCAmelCase , UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_45_04_39_45) ) < 1e-3
assert np.abs((np.abs(UpperCAmelCase , dtype=np.floataa ).sum() - 2_34_76_93.5) ) < 5e-1
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
_UpperCAmelCase = jax.device_count()
_UpperCAmelCase = num_samples * [prompt]
_UpperCAmelCase = jax.random.split(jax.random.PRNGKey(0 ) , UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
_UpperCAmelCase , _UpperCAmelCase = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=UpperCAmelCase , use_memory_efficient_attention=UpperCAmelCase , )
_UpperCAmelCase = replicate(UpperCAmelCase )
_UpperCAmelCase = pipeline.prepare_inputs(UpperCAmelCase )
_UpperCAmelCase = shard(UpperCAmelCase )
_UpperCAmelCase = pipeline(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , jit=UpperCAmelCase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
_UpperCAmelCase = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 39 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCAmelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCAmelCase__ = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def _a ( a :Tuple , a :str , a :List[str]=8 ) -> Any:
a = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
a = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowercase_ ( lowercase ):
'''simple docstring'''
def __init__( self : Dict , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : VQModel , ) ->Optional[int]:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
a = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ) ->str:
"""simple docstring"""
if latents is None:
a = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
a = latents.to(__UpperCAmelCase )
a = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str]=0 ) ->Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
a = torch.device(F"""cuda:{gpu_id}""" )
a = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Union[str, Any]=0 ) ->Dict:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
a = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
a = None
for cpu_offloaded_model in [self.unet, self.movq]:
a , a = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
a = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__( self : List[str] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 512 , __UpperCAmelCase : int = 100 , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) ->List[Any]:
"""simple docstring"""
a = self._execution_device
a = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = torch.cat(__UpperCAmelCase , dim=0 )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = torch.cat(__UpperCAmelCase , dim=0 )
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
a = torch.cat(__UpperCAmelCase , dim=0 )
a = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
a = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
a = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
a = hint.repeat_interleave(__UpperCAmelCase , dim=0 )
a = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
a = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
a = self.scheduler.timesteps
a = self.movq.config.latent_channels
a , a = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
# create initial latent
a = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
a = {'''image_embeds''': image_embeds, '''hint''': hint}
a = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
a , a = noise_pred.split(latents.shape[1] , dim=1 )
a , a = noise_pred.chunk(2 )
a , a = variance_pred.chunk(2 )
a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
a = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
a , a = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
a = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
a = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
a = image * 0.5 + 0.5
a = image.clamp(0 , 1 )
a = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 26 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 26 | 1 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
A__: Any = datasets.load_iris()
A__: int = np.array(data['''data'''])
A__: str = np.array(data['''target'''])
A__: str = data['''target_names''']
A__ , A__ , A__ , A__: Tuple = train_test_split(X, y)
def lowerCAmelCase_ ( A_ ,A_):
return np.linalg.norm(np.array(_lowerCAmelCase) - np.array(_lowerCAmelCase))
def lowerCAmelCase_ ( A_ ,A_ ,A_ ,A_ ,A_=5):
UpperCamelCase__: Optional[Any] = zip(_lowerCAmelCase ,_lowerCAmelCase)
# List of distances of all points from the point to be classified
UpperCamelCase__: Optional[int] = []
for data_point in data:
UpperCamelCase__: List[str] = euclidean_distance(data_point[0] ,_lowerCAmelCase)
distances.append((distance, data_point[1]))
# Choosing 'k' points with the least distances.
UpperCamelCase__: Tuple = [i[1] for i in sorted(_lowerCAmelCase)[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
UpperCamelCase__: List[Any] = Counter(_lowerCAmelCase).most_common(1)[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 149 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166 | 0 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : List[str] = (EulerDiscreteScheduler,)
UpperCAmelCase : Optional[Any] = 10
def __snake_case ( self , **A_ ) -> Any:
lowerCAmelCase = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
}
config.update(**A_ )
return config
def __snake_case ( self ) -> Optional[int]:
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def __snake_case ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def __snake_case ( self ) -> List[Any]:
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=A_ )
def __snake_case ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(A_ , A_ )
lowerCAmelCase = model(A_ , A_ )
lowerCAmelCase = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
lowerCAmelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
lowerCAmelCase = sample.to(A_ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase = scheduler.scale_model_input(A_ , A_ )
lowerCAmelCase = model(A_ , A_ )
lowerCAmelCase = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 0.0_0_0_2 ) < 1e-2
assert abs(result_mean.item() - 2.26_76e-06 ) < 1e-3
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase = sample.to(A_ )
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(A_ , A_ )
lowerCAmelCase = model(A_ , A_ )
lowerCAmelCase = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_0.0_8_0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1 ) < 1e-3
def __snake_case ( self ) -> str:
lowerCAmelCase = self.scheduler_classes[0]
lowerCAmelCase = self.get_scheduler_config()
lowerCAmelCase = scheduler_class(**A_ , use_karras_sigmas=A_ )
scheduler.set_timesteps(self.num_inference_steps , device=A_ )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = self.dummy_model()
lowerCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
lowerCAmelCase = sample.to(A_ )
for t in scheduler.timesteps:
lowerCAmelCase = scheduler.scale_model_input(A_ , A_ )
lowerCAmelCase = model(A_ , A_ )
lowerCAmelCase = scheduler.step(A_ , A_ , A_ , generator=A_ )
lowerCAmelCase = output.prev_sample
lowerCAmelCase = torch.sum(torch.abs(A_ ) )
lowerCAmelCase = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3 ) < 1e-3 | 187 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCAmelCase = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[Any]:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Dict:
"""simple docstring"""
return max(metric_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for gt in ground_truths )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = []
if args.gold_data_mode == "qa":
lowerCAmelCase = pd.read_csv(_SCREAMING_SNAKE_CASE , sep="""\t""" , header=_SCREAMING_SNAKE_CASE )
for answer_list in data[1]:
lowerCAmelCase = ast.literal_eval(_SCREAMING_SNAKE_CASE )
answers.append(_SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [[reference] for reference in references]
lowerCAmelCase = lowerCAmelCase = lowerCAmelCase = 0
for prediction, ground_truths in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
total += 1
em += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
fa += metric_max_over_ground_truths(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = 100.0 * em / total
lowerCAmelCase = 100.0 * fa / total
logger.info(f'F1: {fa:.2f}' )
logger.info(f'EM: {em:.2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = args.k
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = [line.strip() for line in open(_SCREAMING_SNAKE_CASE , """r""" ).readlines()]
lowerCAmelCase = lowerCAmelCase = 0
for hypo, reference in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase = set(hypo.split("""\t""" )[:k] )
lowerCAmelCase = set(reference.split("""\t""" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase = 100.0 * em / total
logger.info(f'Precision@{k}: {em: .2f}' )
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> Any:
"""simple docstring"""
def strip_title(_SCREAMING_SNAKE_CASE : Union[str, Any] ):
if title.startswith("""\"""" ):
lowerCAmelCase = title[1:]
if title.endswith("""\"""" ):
lowerCAmelCase = title[:-1]
return title
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device )
lowerCAmelCase = rag_model.rag.question_encoder(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = question_enc_outputs[0]
lowerCAmelCase = rag_model.retriever(
_SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , )
lowerCAmelCase = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase = []
for docs in all_docs:
lowerCAmelCase = [strip_title(_SCREAMING_SNAKE_CASE ) for title in docs["""title"""]]
provenance_strings.append("""\t""".join(_SCREAMING_SNAKE_CASE ) )
return provenance_strings
def _snake_case ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str ) -> Tuple:
"""simple docstring"""
with torch.no_grad():
lowerCAmelCase = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
_SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
lowerCAmelCase = inputs_dict.input_ids.to(args.device )
lowerCAmelCase = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase = rag_model.generate( # rag_model overwrites generate
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase = rag_model.retriever.generator_tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
if args.print_predictions:
for q, a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
logger.info("""Q: {} - A: {}""".format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
return answers
def _snake_case ( ) -> Dict:
"""simple docstring"""
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_SCREAMING_SNAKE_CASE , help=(
"""RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"""
""" model_name_or_path"""
) , )
parser.add_argument(
"""--index_name""" , default=_SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=_SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , )
parser.add_argument(
"""--index_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , )
parser.add_argument("""--n_docs""" , default=5 , type=_SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" )
parser.add_argument(
"""--model_name_or_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_SCREAMING_SNAKE_CASE , help=(
"""Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"""
""" precision@k."""
) , )
parser.add_argument("""--k""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" )
parser.add_argument(
"""--evaluation_set""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , )
parser.add_argument(
"""--gold_data_path""" , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , )
parser.add_argument(
"""--gold_data_mode""" , default="""qa""" , type=_SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=(
"""Format of the gold data file"""
"""qa - a single line in the following format: question [tab] answer_list"""
"""ans - a single line of the gold file contains the expected answer string"""
) , )
parser.add_argument(
"""--predictions_path""" , type=_SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , )
parser.add_argument(
"""--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , )
parser.add_argument(
"""--eval_batch_size""" , default=8 , type=_SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , )
parser.add_argument(
"""--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , )
parser.add_argument(
"""--num_beams""" , default=4 , type=_SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , )
parser.add_argument("""--min_length""" , default=1 , type=_SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" )
parser.add_argument("""--max_length""" , default=50 , type=_SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" )
parser.add_argument(
"""--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , )
parser.add_argument(
"""--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
return args
def _snake_case ( _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {}
if args.model_type is None:
lowerCAmelCase = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration
lowerCAmelCase = args.n_docs
if args.index_name is not None:
lowerCAmelCase = args.index_name
if args.index_path is not None:
lowerCAmelCase = args.index_path
else:
lowerCAmelCase = BartForConditionalGeneration
lowerCAmelCase = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("""Evaluate the following checkpoints: %s""" , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = get_scores if args.eval_mode == """e2e""" else get_precision_at_k
lowerCAmelCase = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) )
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
continue
logger.info("""***** Running evaluation for {} *****""".format(_SCREAMING_SNAKE_CASE ) )
logger.info(""" Batch size = %d""" , args.eval_batch_size )
logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) )
if args.model_type.startswith("""rag""" ):
lowerCAmelCase = RagRetriever.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , retriever=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.retriever.init_retrieval()
else:
lowerCAmelCase = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
model.to(args.device )
with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file:
lowerCAmelCase = []
for line in tqdm(_SCREAMING_SNAKE_CASE ):
questions.append(line.strip() )
if len(_SCREAMING_SNAKE_CASE ) == args.eval_batch_size:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) + """\n""" )
preds_file.flush()
lowerCAmelCase = []
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = evaluate_batch_fn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
preds_file.write("""\n""".join(_SCREAMING_SNAKE_CASE ) )
preds_file.flush()
score_fn(_SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCAmelCase = get_args()
main(args) | 187 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : int ,lowerCamelCase_ : Optional[Any]=1E-12):
'''simple docstring'''
lowerCAmelCase__ : Any = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowerCamelCase_ ,axis=1) ,a_min=lowerCamelCase_)).T
lowerCAmelCase__ : Tuple = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(lowerCamelCase_ ,axis=1) ,a_min=lowerCamelCase_)).T
return jnp.matmul(lowerCamelCase_ ,norm_emb_a.T)
class lowerCamelCase__ ( nn.Module):
'''simple docstring'''
snake_case_ =42
snake_case_ =jnp.floataa
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Any = FlaxCLIPVisionModule(self.config.vision_config )
lowerCAmelCase__ : Dict = nn.Dense(self.config.projection_dim ,use_bias=__lowerCamelCase ,dtype=self.dtype )
lowerCAmelCase__ : Union[str, Any] = self.param('''concept_embeds''' ,jax.nn.initializers.ones ,(17, self.config.projection_dim) )
lowerCAmelCase__ : Tuple = self.param(
'''special_care_embeds''' ,jax.nn.initializers.ones ,(3, self.config.projection_dim) )
lowerCAmelCase__ : Tuple = self.param('''concept_embeds_weights''' ,jax.nn.initializers.ones ,(17,) )
lowerCAmelCase__ : Tuple = self.param('''special_care_embeds_weights''' ,jax.nn.initializers.ones ,(3,) )
def __call__(self ,__lowerCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = self.vision_model(__lowerCamelCase )[1]
lowerCAmelCase__ : int = self.visual_projection(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = jax_cosine_distance(__lowerCamelCase ,self.special_care_embeds )
lowerCAmelCase__ : Optional[int] = jax_cosine_distance(__lowerCamelCase ,self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
lowerCAmelCase__ : str = 0.0
lowerCAmelCase__ : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
lowerCAmelCase__ : str = jnp.round(__lowerCamelCase ,3 )
lowerCAmelCase__ : Optional[int] = jnp.any(special_scores > 0 ,axis=1 ,keepdims=__lowerCamelCase )
# Use a lower threshold if an image has any special care concept
lowerCAmelCase__ : List[str] = is_special_care * 0.01
lowerCAmelCase__ : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
lowerCAmelCase__ : Union[str, Any] = jnp.round(__lowerCamelCase ,3 )
lowerCAmelCase__ : Any = jnp.any(concept_scores > 0 ,axis=1 )
return has_nsfw_concepts
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =CLIPConfig
snake_case_ ="""clip_input"""
snake_case_ =FlaxStableDiffusionSafetyCheckerModule
def __init__(self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = 0 ,__lowerCamelCase = jnp.floataa ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> Union[str, Any]:
"""simple docstring"""
if input_shape is None:
lowerCAmelCase__ : Optional[Any] = (1, 2_24, 2_24, 3)
lowerCAmelCase__ : List[str] = self.module_class(config=__lowerCamelCase ,dtype=__lowerCamelCase ,**__lowerCamelCase )
super().__init__(__lowerCamelCase ,__lowerCamelCase ,input_shape=__lowerCamelCase ,seed=__lowerCamelCase ,dtype=__lowerCamelCase ,_do_init=_do_init )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ) -> FrozenDict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = jax.random.normal(__lowerCamelCase ,__lowerCamelCase )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = jax.random.split(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = {'''params''': params_rng, '''dropout''': dropout_rng}
lowerCAmelCase__ : Dict = self.module.init(__lowerCamelCase ,__lowerCamelCase )['''params''']
return random_params
def __call__(self ,__lowerCamelCase ,__lowerCamelCase = None ,) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Any = jnp.transpose(__lowerCamelCase ,(0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} ,jnp.array(__lowerCamelCase ,dtype=jnp.floataa ) ,rngs={} ,)
| 129 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129 | 1 |
'''simple docstring'''
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase_ = importlib.util.find_spec('''s3fs''') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase_ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
if "://" in dataset_path:
_A = dataset_path.split("://" )[1]
return dataset_path
def __lowercase ( __lowercase ) -> List[str]:
'''simple docstring'''
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = not is_remote_filesystem(__lowercase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(__lowercase ) , fs._strip_protocol(__lowercase ) )
else:
fs.mv(__lowercase , __lowercase , recursive=__lowercase )
def __lowercase ( ) -> Dict:
'''simple docstring'''
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_A = None
_A = None
_A = threading.Lock()
| 371 |
'''simple docstring'''
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 174 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
__lowerCamelCase : Any = logging.get_logger(__name__)
def _snake_case ( lowerCAmelCase : bool , lowerCAmelCase : bool ):
"""simple docstring"""
def run_func(lowerCAmelCase : int ):
@wraps(lowerCAmelCase )
def run_in_eager_mode(*lowerCAmelCase : Tuple , **lowerCAmelCase : Any ):
return func(*lowerCAmelCase , **lowerCAmelCase )
@wraps(lowerCAmelCase )
@tf.function(experimental_compile=lowerCAmelCase )
def run_in_graph_mode(*lowerCAmelCase : Union[str, Any] , **lowerCAmelCase : List[str] ):
return func(*lowerCAmelCase , **lowerCAmelCase )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = random.Random()
SCREAMING_SNAKE_CASE_ : str = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowerCAmelCase , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class a__ ( A__ ):
A = 42
A = 42
A = "TensorFlow"
@property
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return tf.__version__
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : Any = self._prepare_inference_func(_A,_A,_A )
return self._measure_speed(_inference )
def __UpperCamelCase ( self : str,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : List[Any] = self._prepare_train_func(_A,_A,_A )
return self._measure_speed(_train )
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Tuple = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_inference_func(_A,_A,_A )
return self._measure_memory(_inference )
def __UpperCamelCase ( self : Optional[Any],_A : str,_A : int,_A : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx],_A )
SCREAMING_SNAKE_CASE_ : Any = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
SCREAMING_SNAKE_CASE_ : str = self._prepare_train_func(_A,_A,_A )
return self._measure_memory(_train )
def __UpperCamelCase ( self : int,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : List[Any] = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : List[Any] = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[str] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = TF_MODEL_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Tuple = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : str = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_forward():
return model(_A,decoder_input_ids=_A,training=_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_forward():
return model(_A,training=_A )
SCREAMING_SNAKE_CASE_ : Tuple = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def __UpperCamelCase ( self : Dict,_A : str,_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
SCREAMING_SNAKE_CASE_ : Dict = (
hasattr(_A,"architectures" )
and isinstance(config.architectures,_A )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
SCREAMING_SNAKE_CASE_ : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
SCREAMING_SNAKE_CASE_ : Tuple = __import__("transformers",fromlist=[model_class] )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_A,_A )
SCREAMING_SNAKE_CASE_ : List[Any] = model_cls(_A )
except ImportError:
raise ImportError(
F'{model_class} does not exist. If you just want to test the pretrained model, you might want to'
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
SCREAMING_SNAKE_CASE_ : Any = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](_A )
# encoder-decoder has vocab size saved differently
SCREAMING_SNAKE_CASE_ : Any = config.vocab_size if hasattr(_A,"vocab_size" ) else config.encoder.vocab_size
SCREAMING_SNAKE_CASE_ : int = random_input_ids(_A,_A,_A )
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_decoder_train():
SCREAMING_SNAKE_CASE_ : Dict = model(_A,decoder_input_ids=_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = tf.gradients(_A,model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode,self.args.use_xla )
def encoder_train():
SCREAMING_SNAKE_CASE_ : Tuple = model(_A,labels=_A,training=_A )[0]
SCREAMING_SNAKE_CASE_ : Tuple = tf.gradients(_A,model.trainable_variables )
return gradients
SCREAMING_SNAKE_CASE_ : List[Any] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def __UpperCamelCase ( self : int,_A : Any ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(_A,repeat=1,number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
SCREAMING_SNAKE_CASE_ : Optional[int] = timeit.repeat(
_A,repeat=self.args.repeat,number=10,)
return min(_A ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
def __UpperCamelCase ( self : List[str],_A : Callable[[], None] ):
"""simple docstring"""
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
SCREAMING_SNAKE_CASE_ : List[str] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
SCREAMING_SNAKE_CASE_ : Optional[int] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
SCREAMING_SNAKE_CASE_ : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = nvml.nvmlDeviceGetMemoryInfo(_A )
SCREAMING_SNAKE_CASE_ : Any = meminfo.used
SCREAMING_SNAKE_CASE_ : List[Any] = Memory(_A )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = measure_peak_memory_cpu(_A )
SCREAMING_SNAKE_CASE_ : str = Memory(_A ) if isinstance(_A,_A ) else memory_bytes
if self.args.trace_memory_line_by_line:
SCREAMING_SNAKE_CASE_ : Optional[int] = stop_memory_tracing(_A )
if memory is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = summary.total
else:
SCREAMING_SNAKE_CASE_ : Optional[int] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(F'Doesn\'t fit on GPU. {e}' )
return "N/A", None
| 18 | from math import factorial, radians
def _snake_case ( lowerCAmelCase : float , lowerCAmelCase : int = 1_8 , lowerCAmelCase : int = 1_0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
SCREAMING_SNAKE_CASE_ : Tuple = radians(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = angle_in_radians
SCREAMING_SNAKE_CASE_ : List[str] = 3
SCREAMING_SNAKE_CASE_ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class _lowerCamelCase ( unittest.TestCase ):
@property
def snake_case_ (self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def snake_case_ (self ) -> Any:
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=__a , output_type="numpy" , return_dict=__a )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = "google/ncsnpp-celebahq-256"
UpperCamelCase = UNetaDModel.from_pretrained(__a )
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=__a , scheduler=__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=20 , generator=__a , output_type="numpy" ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 359 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 244 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
_lowerCamelCase = Features({"text": Value("string" )} )
_lowerCamelCase = Features({"labels": ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowerCamelCase_ = copy.deepcopy(self )
lowerCamelCase_ = self.label_schema.copy()
lowerCamelCase_ = features[self.label_column]
lowerCamelCase_ = label_schema
return task_template
@property
def snake_case ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 55 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a_ : Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
a_ : int = """sshleifer/student_marian_en_ro_6_1"""
a_ : str = """sshleifer/tiny-mbart"""
@require_torch
class snake_case ( lowercase ):
"""simple docstring"""
def snake_case ( self , UpperCamelCase=False , UpperCamelCase=None , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , UpperCamelCase=True , ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCamelCase , num_train_epochs=1 , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , predict_with_generate=UpperCamelCase , do_train=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , )
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
if not do_eval:
return
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
lowerCamelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@require_torch_multi_gpu
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=UpperCamelCase )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def snake_case ( self ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=UpperCamelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=UpperCamelCase )
@require_apex
@require_torch_gpu
def snake_case ( self ):
"""simple docstring"""
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCamelCase , extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
lowerCamelCase_ = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
lowerCamelCase_ = experiments[experiment_id]
lowerCamelCase_ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
lowerCamelCase_ = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCamelCase , extra_args_str=data["extra_args_str"] )
lowerCamelCase_ = len(re.findall(UpperCamelCase , cl.err ) )
self.assertEqual(UpperCamelCase , data["n_matches"] )
@slow
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCamelCase , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(os.path.join(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = [log for log in logs if "eval_loss" in log.keys()]
lowerCamelCase_ = eval_metrics[0]
lowerCamelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"] , UpperCamelCase )
# test if do_predict saves generations and metrics
lowerCamelCase_ = os.listdir(UpperCamelCase )
lowerCamelCase_ = {os.path.basename(UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def snake_case ( self ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCamelCase ) -> Tuple[int, float]:
lowerCamelCase_ = "--skip_memory_metrics 0"
lowerCamelCase_ = self.run_trainer(
max_len=128 , model_name=UpperCamelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCamelCase , distributed=UpperCamelCase , extra_args_str=UpperCamelCase , do_eval=UpperCamelCase , do_predict=UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
lowerCamelCase_ = TrainerState.load_from_json(Path(UpperCamelCase , "trainer_state.json" ) ).log_history
lowerCamelCase_ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
lowerCamelCase_ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
lowerCamelCase_ = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
lowerCamelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
lowerCamelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
lowerCamelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
lowerCamelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'''
f''' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB''' , )
self.assertGreater(
UpperCamelCase , UpperCamelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f''' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'''
f''' gpu_total_mem_bnb={gpu_total_mem_bnb}MB''' , )
self.assertEqual(
UpperCamelCase , UpperCamelCase , f'''loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}''' )
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = 3e-3 , UpperCamelCase = "adafactor" , UpperCamelCase = False , UpperCamelCase = None , UpperCamelCase = 0 , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = True , UpperCamelCase = None , ):
"""simple docstring"""
lowerCamelCase_ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
lowerCamelCase_ = self.get_auto_remove_tmp_dir()
lowerCamelCase_ = f'''
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
'''.split()
lowerCamelCase_ = f'''
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCamelCase )}
'''.split()
lowerCamelCase_ = "\n --do_predict\n ".split()
lowerCamelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'''--optim {optim}'''.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
lowerCamelCase_ = get_gpu_count()
lowerCamelCase_ = get_torch_dist_unique_port()
lowerCamelCase_ = f'''
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
'''.split()
lowerCamelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCamelCase , env=self.get_env() )
else:
lowerCamelCase_ = ["run_translation.py"] + args
with patch.object(UpperCamelCase , "argv" , UpperCamelCase ):
main()
return output_dir
| 55 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import MutableSequence
class a :
def __init__( self : Union[str, Any] , __lowerCAmelCase : int , __lowerCAmelCase : MutableSequence[float] ):
if len(__lowerCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
_UpperCAmelCase = list(__lowerCAmelCase )
_UpperCAmelCase = degree
def __add__( self : List[str] , __lowerCAmelCase : Polynomial ):
if self.degree > polynomial_a.degree:
_UpperCAmelCase = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __lowerCAmelCase )
else:
_UpperCAmelCase = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __lowerCAmelCase )
def __sub__( self : Union[str, Any] , __lowerCAmelCase : Polynomial ):
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : Dict ):
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , __lowerCAmelCase : Polynomial ):
_UpperCAmelCase = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __lowerCAmelCase )
def lowerCAmelCase_ ( self : str , __lowerCAmelCase : int | float ):
_UpperCAmelCase = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ):
_UpperCAmelCase = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__lowerCAmelCase )
return polynomial
def __repr__( self : List[str] ):
return self.__str__()
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = [0] * self.degree
for i in range(self.degree ):
_UpperCAmelCase = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , __lowerCAmelCase : int | float = 0 ):
_UpperCAmelCase = [0] * (self.degree + 2)
_UpperCAmelCase = constant
for i in range(self.degree + 1 ):
_UpperCAmelCase = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __lowerCAmelCase )
def __eq__( self : List[str] , __lowerCAmelCase : object ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : Tuple , __lowerCAmelCase : object ):
return not self.__eq__(__lowerCAmelCase )
| 30 | """simple docstring"""
def __UpperCAmelCase ( lowercase = 10_00 ):
"""simple docstring"""
_UpperCAmelCase = 2**power
_UpperCAmelCase = 0
while n:
_UpperCAmelCase , _UpperCAmelCase = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 30 | 1 |
def __A ( __lowerCAmelCase , __lowerCAmelCase )-> float:
"""simple docstring"""
return base * power(__lowerCAmelCase , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('''Raise base to the power of exponent using recursion...''')
_a = int(input('''Enter the base: ''').strip())
_a = int(input('''Enter the exponent: ''').strip())
_a = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
_a = 1 / result
print(F'''{base} to the power of {exponent} is {result}''')
| 39 | """simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=None ,__UpperCamelCase=2 ,) -> List[Any]:
'''simple docstring'''
lowercase_ : Tuple = parent
lowercase_ : List[Any] = batch_size
lowercase_ : Optional[int] = image_size
lowercase_ : List[str] = patch_size
lowercase_ : int = num_channels
lowercase_ : List[str] = is_training
lowercase_ : Union[str, Any] = use_labels
lowercase_ : str = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Dict = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = hidden_dropout_prob
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : List[str] = type_sequence_label_size
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : int = scope
lowercase_ : Dict = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ : Any = (image_size // patch_size) ** 2
lowercase_ : Optional[int] = num_patches + 1
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
lowercase_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Tuple = None
if self.use_labels:
lowercase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase_ : Any = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : List[Any] = ViTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = ViTForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : List[str] = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ : Any = 1
lowercase_ : List[Any] = ViTForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : str = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str:
'''simple docstring'''
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : Dict = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : int = model(__UpperCamelCase ,labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ : Dict = 1
lowercase_ : Any = ViTForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Dict = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Optional[Any] = config_and_inputs
lowercase_ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
lowercase = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowercase = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModelTester(self )
lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowercase_ : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase ,nn.Linear ) )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(__UpperCamelCase )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Tuple = [*signature.parameters.keys()]
lowercase_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = ViTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def lowercase__( ):
lowercase_ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : str = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(__UpperCamelCase )
lowercase_ : Any = self.default_image_processor
lowercase_ : Dict = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**__UpperCamelCase )
# verify the logits
lowercase_ : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__UpperCamelCase )
lowercase_ : Optional[int] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : str = ViTModel.from_pretrained('facebook/dino-vits8' ).to(__UpperCamelCase )
lowercase_ : int = ViTImageProcessor.from_pretrained('facebook/dino-vits8' ,size=480 )
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : int = inputs.pixel_values.to(__UpperCamelCase )
# forward pass
with torch.no_grad():
lowercase_ : int = model(__UpperCamelCase ,interpolate_pos_encoding=__UpperCamelCase )
# verify the logits
lowercase_ : Any = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape ,__UpperCamelCase )
lowercase_ : Any = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : Optional[int] = ViTModel.from_pretrained('facebook/dino-vits8' ,torch_dtype=torch.floataa ,device_map='auto' )
lowercase_ : int = self.default_image_processor
lowercase_ : Optional[int] = prepare_img()
lowercase_ : Tuple = image_processor(images=__UpperCamelCase ,return_tensors='pt' )
lowercase_ : Union[str, Any] = inputs.pixel_values.to(__UpperCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ : Dict = model(__UpperCamelCase )
| 213 | 0 |
"""simple docstring"""
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F'{solution() = }')
| 366 |
from pathlib import Path
import fire
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
lowercase__ = Path(SCREAMING_SNAKE_CASE_ )
dest_dir.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
for path in src_dir.iterdir():
lowercase__ = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ = dest_dir.joinpath(path.name )
print(SCREAMING_SNAKE_CASE_ )
dest_path.open("w" ).write("\n".join(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 224 | 0 |
import os
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : Tuple = os.path.dirname(os.path.realpath(__A ) )
a_ : Tuple = os.path.join(__A , 'triangle.txt' )
with open(__A ) as f:
a_ : int = f.readlines()
a_ : Optional[int] = []
for line in triangle:
a_ : Optional[int] = []
for number in line.strip().split(' ' ):
numbers_from_line.append(int(__A ) )
a.append(__A )
for i in range(1 , len(__A ) ):
for j in range(len(a[i] ) ):
a_ : List[Any] = a[i - 1][j] if j != len(a[i - 1] ) else 0
a_ : Optional[Any] = a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(__A , __A )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 32 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ..utils import _LazyModule
_A = {
"""config""": [
"""EXTERNAL_DATA_FORMAT_SIZE_LIMIT""",
"""OnnxConfig""",
"""OnnxConfigWithPast""",
"""OnnxSeq2SeqConfigWithPast""",
"""PatchingSpec""",
],
"""convert""": ["""export""", """validate_model_outputs"""],
"""features""": ["""FeaturesManager"""],
"""utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 171 | 0 |
"""simple docstring"""
from math import factorial, pi
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
lowerCAmelCase_ : List[str] = float(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowerCAmelCase__ ) )
def UpperCamelCase_ ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 30 ):
"""simple docstring"""
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
lowerCAmelCase_ : str = float(lowerCAmelCase__ )
lowerCAmelCase_ : Tuple = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowerCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 371 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ : Any = TypeVar("""_T""")
class UpperCamelCase__ ( Generic[_T] ):
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Iterable[_T] | None = None ):
lowerCAmelCase_ : list[_T] = list(iterable or [] )
lowerCAmelCase_ : list[_T] = []
def __len__( self : List[str] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : int ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : _T ):
self._stacka.append(SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : List[str] = self._stacka.pop
lowerCAmelCase_ : int = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 289 | 0 |
import torch
def lowerCAmelCase_ ( ):
if torch.cuda.is_available():
_A : Dict = torch.cuda.device_count()
else:
_A : Union[str, Any] = 0
print(f'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 26 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def a__ ( self ) -> Any:
_A : Tuple = TFCamembertModel.from_pretrained("""jplu/tf-camembert-base""" )
_A : List[Any] = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_A : List[str] = model(_a )["""last_hidden_state"""]
_A : Union[str, Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _a )
# compare the actual values for a slice.
_A : List[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 26 | 1 |
"""simple docstring"""
from itertools import count
def UpperCAmelCase ( a_ = 50 ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = [1] * min_block_length
for n in count(a_ ):
fill_count_functions.append(1 )
for block_length in range(a_, n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F"""{solution() = }""")
| 205 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = 'sew-d'
def __init__( self , UpperCAmelCase_=32 , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_=2 , UpperCAmelCase_=512 , UpperCAmelCase_=256 , UpperCAmelCase_=True , UpperCAmelCase_=True , UpperCAmelCase_=("p2c", "c2p") , UpperCAmelCase_="layer_norm" , UpperCAmelCase_="gelu_python" , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.1 , UpperCAmelCase_=0.02 , UpperCAmelCase_=1E-7 , UpperCAmelCase_=1E-5 , UpperCAmelCase_="group" , UpperCAmelCase_="gelu" , UpperCAmelCase_=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCAmelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCAmelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCAmelCase_=False , UpperCAmelCase_=128 , UpperCAmelCase_=16 , UpperCAmelCase_=True , UpperCAmelCase_=0.05 , UpperCAmelCase_=10 , UpperCAmelCase_=2 , UpperCAmelCase_=0.0 , UpperCAmelCase_=10 , UpperCAmelCase_=0 , UpperCAmelCase_="mean" , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=256 , UpperCAmelCase_=0 , UpperCAmelCase_=1 , UpperCAmelCase_=2 , **UpperCAmelCase_ , ) -> Optional[Any]:
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
lowerCamelCase : Any = hidden_size
lowerCamelCase : Any = feat_extract_norm
lowerCamelCase : List[str] = feat_extract_activation
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : Any = list(UpperCAmelCase_ )
lowerCamelCase : str = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = conv_bias
lowerCamelCase : Optional[int] = num_conv_pos_embeddings
lowerCamelCase : str = num_conv_pos_embedding_groups
lowerCamelCase : Optional[int] = len(self.conv_dim )
lowerCamelCase : Optional[int] = num_hidden_layers
lowerCamelCase : Union[str, Any] = intermediate_size
lowerCamelCase : str = squeeze_factor
lowerCamelCase : Any = max_position_embeddings
lowerCamelCase : List[Any] = position_buckets
lowerCamelCase : Union[str, Any] = share_att_key
lowerCamelCase : Optional[int] = relative_attention
lowerCamelCase : Tuple = norm_rel_ebd
lowerCamelCase : Union[str, Any] = list(UpperCAmelCase_ )
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : Optional[Any] = num_attention_heads
lowerCamelCase : Tuple = hidden_dropout
lowerCamelCase : List[Any] = attention_dropout
lowerCamelCase : Optional[Any] = activation_dropout
lowerCamelCase : List[str] = feat_proj_dropout
lowerCamelCase : List[str] = final_dropout
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : int = feature_layer_norm_eps
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase : Any = apply_spec_augment
lowerCamelCase : Optional[int] = mask_time_prob
lowerCamelCase : Optional[Any] = mask_time_length
lowerCamelCase : str = mask_time_min_masks
lowerCamelCase : List[Any] = mask_feature_prob
lowerCamelCase : int = mask_feature_length
lowerCamelCase : List[Any] = mask_feature_min_masks
# ctc loss
lowerCamelCase : Optional[Any] = ctc_loss_reduction
lowerCamelCase : Union[str, Any] = ctc_zero_infinity
# sequence classification
lowerCamelCase : Optional[Any] = use_weighted_layer_sum
lowerCamelCase : Dict = classifier_proj_size
@property
def _UpperCamelCase ( self ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 205 | 1 |
"""simple docstring"""
_lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 74 |
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def a( A : Tuple ) -> Optional[Any]:
"""simple docstring"""
a = model.config
a = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
a = MBartConfig(
is_decoder=A , is_encoder_decoder=A , add_cross_attention=A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=A , add_final_layer_norm=A , )
return encoder_config, decoder_config
def a( A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
a = name.replace("encoder.model" , "encoder" )
if "decoder.model" in name:
a = name.replace("decoder.model" , "decoder" )
if "patch_embed.proj" in name:
a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
a = name.replace("patch_embed.norm" , "embeddings.norm" )
if name.startswith("encoder" ):
if "layers" in name:
a = "encoder." + name
if "attn.proj" in name:
a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "mask" not in name:
a = name.replace("attn" , "attention.self" )
if "norm1" in name:
a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
a = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
a = "encoder.layernorm.weight"
if name == "encoder.norm.bias":
a = "encoder.layernorm.bias"
return name
def a( A : Union[str, Any] , A : Tuple ) -> List[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a = orig_state_dict.pop(A )
if "qkv" in key:
a = key.split("." )
a = int(key_split[3] )
a = int(key_split[5] )
a = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a = val[:dim, :]
a = val[dim : dim * 2, :]
a = val[-dim:, :]
else:
a = val[:dim]
a = val[dim : dim * 2]
a = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
a = val
return orig_state_dict
def a( A : List[Any] , A : Tuple=None , A : List[Any]=False ) -> Optional[int]:
"""simple docstring"""
a = DonutModel.from_pretrained(A ).eval()
# load HuggingFace model
a , a = get_configs(A )
a = DonutSwinModel(A )
a = MBartForCausalLM(A )
a = VisionEncoderDecoderModel(encoder=A , decoder=A )
model.eval()
a = original_model.state_dict()
a = convert_state_dict(A , A )
model.load_state_dict(A )
# verify results on scanned document
a = load_dataset("hf-internal-testing/example-documents" )
a = dataset["test"][0]["image"].convert("RGB" )
a = XLMRobertaTokenizerFast.from_pretrained(A , from_slow=A )
a = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
a = DonutProcessor(A , A )
a = processor(A , return_tensors="pt" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
a = "When is the coffee break?"
a = task_prompt.replace("{user_input}" , A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
a = "<s_rvlcdip>"
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
a = "<s_cord>"
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
a = "s_cord-v2>"
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
a = "<s_zhtrainticket>"
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
a = "hello world"
else:
raise ValueError("Model name not supported" )
a = original_model.decoder.tokenizer(A , add_special_tokens=A , return_tensors="pt" )[
"input_ids"
]
a = original_model.encoder.model.patch_embed(A )
a , a = model.encoder.embeddings(A )
assert torch.allclose(A , A , atol=1e-3 )
# verify encoder hidden states
a = original_model.encoder(A )
a = model.encoder(A ).last_hidden_state
assert torch.allclose(A , A , atol=1e-2 )
# verify decoder hidden states
a = original_model(A , A , A ).logits
a = model(A , decoder_input_ids=A ).logits
assert torch.allclose(A , A , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" )
if __name__ == "__main__":
_lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="naver-clova-ix/donut-base-finetuned-docvqa",
required=False,
type=str,
help="Name of the original model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
required=False,
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
)
_lowercase: Optional[Any] = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 227 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=False , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : Tuple = is_training
UpperCAmelCase_ : Dict = use_input_mask
UpperCAmelCase_ : Optional[Any] = use_token_type_ids
UpperCAmelCase_ : Optional[Any] = use_labels
UpperCAmelCase_ : Tuple = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Dict = num_hidden_layers
UpperCAmelCase_ : Optional[int] = num_attention_heads
UpperCAmelCase_ : Optional[Any] = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase_ : Dict = attention_probs_dropout_prob
UpperCAmelCase_ : Union[str, Any] = max_position_embeddings
UpperCAmelCase_ : Dict = type_vocab_size
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : int = initializer_range
UpperCAmelCase_ : Tuple = num_labels
UpperCAmelCase_ : int = num_choices
UpperCAmelCase_ : str = scope
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_input_mask:
UpperCAmelCase_ : str = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : str = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : Dict = None
UpperCAmelCase_ : List[Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self ):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Any = model(snake_case_ , attention_mask=snake_case_ )
UpperCAmelCase_ : Tuple = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
'''simple docstring'''
UpperCAmelCase_ : int = BioGptForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = BioGptModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# create attention mask
UpperCAmelCase_ : int = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
UpperCAmelCase_ : str = self.seq_length // 2
UpperCAmelCase_ : Union[str, Any] = 0
# first forward pass
UpperCAmelCase_ : List[str] = model(snake_case_ , attention_mask=snake_case_ ).to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ : List[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
UpperCAmelCase_ : Union[str, Any] = ids_tensor((1,) , snake_case_ ).item() + 1
UpperCAmelCase_ : str = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
UpperCAmelCase_ : Union[str, Any] = random_other_next_tokens
# append to next input_ids and attn_mask
UpperCAmelCase_ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : List[str] = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=snake_case_ )] , dim=1 , )
# get two different outputs
UpperCAmelCase_ : str = model(snake_case_ , attention_mask=snake_case_ )['last_hidden_state']
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ , past_key_values=snake_case_ , attention_mask=snake_case_ )['last_hidden_state']
# select random slice
UpperCAmelCase_ : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase_ : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Any = BioGptModel(config=snake_case_ ).to(snake_case_ ).eval()
UpperCAmelCase_ : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=snake_case_ )
# first forward pass
UpperCAmelCase_ : List[Any] = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
UpperCAmelCase_ : int = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : int = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCAmelCase_ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCAmelCase_ : int = model(snake_case_ , attention_mask=snake_case_ )['last_hidden_state']
UpperCAmelCase_ : Dict = model(snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ )[
'last_hidden_state'
]
# select random slice
UpperCAmelCase_ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Tuple = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ , snake_case_=False ):
'''simple docstring'''
UpperCAmelCase_ : Any = BioGptForCausalLM(snake_case_ )
model.to(snake_case_ )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def _UpperCamelCase ( self , snake_case_ , *snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : str = BioGptModel(snake_case_ )
UpperCAmelCase_ : Optional[Any] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def _UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , *snake_case_ ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.num_labels
UpperCAmelCase_ : str = BioGptForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Optional[Any] = config_and_inputs
UpperCAmelCase_ : str = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ :str = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCamelCase_ :Optional[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCamelCase_ :List[Any] = (
{
'''feature-extraction''': BioGptModel,
'''text-classification''': BioGptForSequenceClassification,
'''text-generation''': BioGptForCausalLM,
'''token-classification''': BioGptForTokenClassification,
'''zero-shot''': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ :Optional[int] = False
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Dict = BioGptModelTester(self )
UpperCAmelCase_ : Any = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def _UpperCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[str] = type
self.model_tester.create_and_check_model(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*snake_case_ , gradient_checkpointing=snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*snake_case_ )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(snake_case_ )
UpperCAmelCase_ : Tuple = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase_ : Tuple = 'left'
# Define PAD Token = EOS Token = 50256
UpperCAmelCase_ : Any = tokenizer.eos_token
UpperCAmelCase_ : Union[str, Any] = model.config.eos_token_id
# use different length sentences to test batching
UpperCAmelCase_ : Union[str, Any] = [
'Hello, my dog is a little',
'Today, I',
]
UpperCAmelCase_ : Optional[int] = tokenizer(snake_case_ , return_tensors='pt' , padding=snake_case_ )
UpperCAmelCase_ : int = inputs['input_ids'].to(snake_case_ )
UpperCAmelCase_ : Optional[int] = model.generate(
input_ids=snake_case_ , attention_mask=inputs['attention_mask'].to(snake_case_ ) , )
UpperCAmelCase_ : List[Any] = tokenizer(sentences[0] , return_tensors='pt' ).input_ids.to(snake_case_ )
UpperCAmelCase_ : Tuple = model.generate(input_ids=snake_case_ )
UpperCAmelCase_ : List[str] = inputs_non_padded.shape[-1] - inputs['attention_mask'][-1].long().sum().cpu().item()
UpperCAmelCase_ : Dict = tokenizer(sentences[1] , return_tensors='pt' ).input_ids.to(snake_case_ )
UpperCAmelCase_ : List[str] = model.generate(input_ids=snake_case_ , max_length=model.config.max_length - num_paddings )
UpperCAmelCase_ : List[Any] = tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Union[str, Any] = [
'Hello, my dog is a little bit bigger than a little bit.',
'Today, I have a good idea of how to use the information',
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , [non_padded_sentence, padded_sentence] )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : List[Any] = BioGptModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : Optional[Any] = input_dict['input_ids']
UpperCAmelCase_ : Optional[Any] = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : Optional[int] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Any = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[int] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = 3
UpperCAmelCase_ : List[str] = 'multi_label_classification'
UpperCAmelCase_ : Optional[int] = input_dict['input_ids']
UpperCAmelCase_ : int = input_ids.ne(1 ).to(snake_case_ )
UpperCAmelCase_ : Optional[Any] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : List[str] = BioGptForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : Any = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
UpperCAmelCase_ : Optional[Any] = torch.tensor([[2, 4_8_0_5, 9, 6_5_6, 2_1]] )
UpperCAmelCase_ : Union[str, Any] = model(snake_case_ )[0]
UpperCAmelCase_ : List[str] = 4_2_3_8_4
UpperCAmelCase_ : Union[str, Any] = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , snake_case_ )
UpperCAmelCase_ : List[Any] = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , snake_case_ , atol=1E-4 ) )
@slow
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : str = BioGptTokenizer.from_pretrained('microsoft/biogpt' )
UpperCAmelCase_ : Tuple = BioGptForCausalLM.from_pretrained('microsoft/biogpt' )
model.to(snake_case_ )
torch.manual_seed(0 )
UpperCAmelCase_ : int = tokenizer('COVID-19 is' , return_tensors='pt' ).to(snake_case_ )
UpperCAmelCase_ : str = model.generate(
**snake_case_ , min_length=1_0_0 , max_length=1_0_2_4 , num_beams=5 , early_stopping=snake_case_ , )
UpperCAmelCase_ : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case_ )
UpperCAmelCase_ : Optional[int] = (
'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'
' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'
' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'
' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'
' more than 800,000 deaths.'
)
self.assertEqual(snake_case_ , snake_case_ )
| 369 | '''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : int = ['a', 'b', 'c']
# Defaults to last layer if both are None
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = get_aligned_output_features_output_indices(snake_case_ , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['c'] )
self.assertEqual(snake_case_ , [2] )
# Out indices set to match out features
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(['a', 'c'] , snake_case_ , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features set to match out indices
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_aligned_output_features_output_indices(snake_case_ , [0, 2] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [0, 2] )
# Out features selected from negative indices
UpperCAmelCase_ , UpperCAmelCase_ : int = get_aligned_output_features_output_indices(snake_case_ , [-3, -1] , snake_case_ )
self.assertEqual(snake_case_ , ['a', 'c'] )
self.assertEqual(snake_case_ , [-3, -1] )
def _UpperCamelCase ( self ):
'''simple docstring'''
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , snake_case_ )
# Out features must be a list
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(('a', 'b') , (0, 1) , ['a', 'b'] )
# Out features must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 1) , ['a'] )
# Out indices must be a list or tuple
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , 0 , ['a', 'b'] )
# Out indices must be a subset of stage names
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(snake_case_ , (0, 1) , ['a'] )
# Out features and out indices must be the same length
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0,) , ['a', 'b', 'c'] )
# Out features should match out indices
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['a', 'b'] , (0, 2) , ['a', 'b', 'c'] )
# Out features and out indices should be in order
with self.assertRaises(snake_case_ ):
verify_out_features_out_indices(['b', 'a'] , (0, 1) , ['a', 'b'] )
# Check passes with valid inputs
verify_out_features_out_indices(['a', 'b', 'd'] , (0, 1, -1) , ['a', 'b', 'c', 'd'] )
def _UpperCamelCase ( self ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = BackboneMixin()
UpperCAmelCase_ : Any = ['a', 'b', 'c']
UpperCAmelCase_ : str = ['a', 'c']
UpperCAmelCase_ : str = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
UpperCAmelCase_ : str = ['a', 'b']
self.assertEqual(backbone.out_features , ['a', 'b'] )
self.assertEqual(backbone.out_indices , [0, 1] )
UpperCAmelCase_ : Optional[int] = [-3, -1]
self.assertEqual(backbone.out_features , ['a', 'c'] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 274 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( _a, unittest.TestCase ):
lowerCamelCase_ : str = RoCBertTokenizer
lowerCamelCase_ : int = None
lowerCamelCase_ : Dict = False
lowerCamelCase_ : Optional[Any] = True
lowerCamelCase_ : Optional[Any] = filter_non_english
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
super().setUp()
snake_case_ : Optional[Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
snake_case_ : Optional[Any] = {}
snake_case_ : int = {}
for i, value in enumerate(__magic_name__ ):
snake_case_ : List[str] = i
snake_case_ : Dict = i
snake_case_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
snake_case_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(__magic_name__ , __magic_name__ , ensure_ascii=__magic_name__ )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(__magic_name__ , __magic_name__ , ensure_ascii=__magic_name__ )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : str = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : List[str] = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(__magic_name__ , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__magic_name__ ) , [5, 6, 2, 5, 7, 8] )
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , strip_accents=__magic_name__ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = RoCBertBasicTokenizer(do_lower_case=__magic_name__ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
snake_case_ : Union[str, Any] = {}
for i, token in enumerate(__magic_name__ ):
snake_case_ : List[str] = i
snake_case_ : List[str] = RoCBertWordpieceTokenizer(vocab=__magic_name__ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__magic_name__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
snake_case_ : Dict = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__magic_name__ ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def lowerCamelCase (self ) -> int:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Optional[Any] = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
snake_case_ : List[str] = tokenizer_r.encode_plus(
__magic_name__ , return_attention_mask=__magic_name__ , return_token_type_ids=__magic_name__ , return_offsets_mapping=__magic_name__ , add_special_tokens=__magic_name__ , )
snake_case_ : Any = tokenizer_r.do_lower_case if hasattr(__magic_name__ , '''do_lower_case''' ) else False
snake_case_ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : Optional[Any] = ['''的''', '''人''', '''有''']
snake_case_ : Union[str, Any] = ''''''.join(__magic_name__ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case_ : Dict = True
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : str = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : str = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Optional[int] = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
snake_case_ : Optional[int] = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
snake_case_ : Dict = False
snake_case_ : int = self.rust_tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Tuple = self.tokenizer_class.from_pretrained(__magic_name__ , **__magic_name__ )
snake_case_ : Any = tokenizer_r.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : List[Any] = tokenizer_p.encode(__magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__magic_name__ )
snake_case_ : Any = tokenizer_p.convert_ids_to_tokens(__magic_name__ )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case_ : Any = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(__magic_name__ )
]
self.assertListEqual(__magic_name__ , __magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
@slow
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
snake_case_ : str = tokenizer.encode('''你好''' , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer.encode('''你是谁''' , add_special_tokens=__magic_name__ )
snake_case_ : Dict = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Dict = self.get_tokenizers(do_lower_case=__magic_name__ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
snake_case_ : Any = '''你好,你是谁'''
snake_case_ : Tuple = tokenizer.tokenize(__magic_name__ )
snake_case_ : Optional[int] = tokenizer.convert_tokens_to_ids(__magic_name__ )
snake_case_ : Dict = tokenizer.convert_tokens_to_shape_ids(__magic_name__ )
snake_case_ : Union[str, Any] = tokenizer.convert_tokens_to_pronunciation_ids(__magic_name__ )
snake_case_ : Dict = tokenizer.prepare_for_model(
__magic_name__ , __magic_name__ , __magic_name__ , add_special_tokens=__magic_name__ )
snake_case_ : int = tokenizer.encode_plus(__magic_name__ , add_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 279 |
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float('''nan''')
class __lowerCAmelCase :
def __init__(self , __magic_name__ ) -> int:
'''simple docstring'''
snake_case_ : List[Any] = sys.stdout
snake_case_ : int = open(__magic_name__ , '''a''' )
def __getattr__(self , __magic_name__ ) -> Dict:
'''simple docstring'''
return getattr(self.stdout , __magic_name__ )
def lowerCamelCase (self , __magic_name__ ) -> int:
'''simple docstring'''
self.stdout.write(__magic_name__ )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __magic_name__ , 0 , re.M ) )
def lowerCamelCase_ ( _UpperCamelCase=80 , _UpperCamelCase=False ) -> str:
"""simple docstring"""
snake_case_ : str = []
# deal with critical env vars
snake_case_ : int = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
snake_case_ : Optional[int] = os.environ.get(_UpperCamelCase , _UpperCamelCase )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
snake_case_ : Optional[int] = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(_UpperCamelCase )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
snake_case_ : Dict = []
snake_case_ : Dict = ''''''
while len(_UpperCamelCase ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(_UpperCamelCase ) == 0 or len(_UpperCamelCase ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(_UpperCamelCase )
snake_case_ : List[Any] = ''''''
return "\\\n".join(_UpperCamelCase )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]:
"""simple docstring"""
snake_case_ : str = re.sub(R'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
snake_case_ : Optional[Any] = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
snake_case_ : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , )
snake_case_ : Tuple = subprocess.run(_UpperCamelCase , capture_output=_UpperCamelCase , text=_UpperCamelCase )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
snake_case_ : Any = variation.replace(''' ''' , '''-''' )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(_UpperCamelCase ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
snake_case_ : str = json.load(_UpperCamelCase )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = []
snake_case_ : Any = []
snake_case_ : int = f'''{id}: {variation:<{longest_variation_len}}'''
snake_case_ : Optional[Any] = f'''{preamble}: '''
snake_case_ : Optional[int] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(_UpperCamelCase ) , desc=_UpperCamelCase , leave=_UpperCamelCase ):
snake_case_ : int = process_run_single(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
snake_case_ : List[str] = single_run_metrics[target_metric_key]
if not math.isnan(_UpperCamelCase ):
metrics.append(_UpperCamelCase )
results.append(_UpperCamelCase )
outcome += "✓"
else:
outcome += "✘"
snake_case_ : Any = f'''\33[2K\r{outcome}'''
if len(_UpperCamelCase ) > 0:
snake_case_ : List[Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
snake_case_ : Any = round(mean_metrics[target_metric_key] , 2 )
snake_case_ : List[str] = f'''{outcome} {mean_target}'''
if len(_UpperCamelCase ) > 1:
results_str += f''' {tuple(round(_UpperCamelCase , 2 ) for x in results )}'''
print(_UpperCamelCase )
snake_case_ : Optional[int] = variation
return mean_metrics
else:
print(_UpperCamelCase )
return {variation_key: variation, target_metric_key: nan}
def lowerCamelCase_ ( ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Dict:
"""simple docstring"""
snake_case_ : str = pd.DataFrame(_UpperCamelCase )
snake_case_ : Optional[int] = '''variation'''
snake_case_ : Union[str, Any] = '''diff_%'''
snake_case_ : Optional[int] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
snake_case_ : Optional[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(_UpperCamelCase ):
# as a fallback, use the minimal value as the sentinel
snake_case_ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(_UpperCamelCase ):
snake_case_ : Dict = df.apply(
lambda _UpperCamelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
snake_case_ : Dict = [variation_key, target_metric_key, diff_key, *report_metric_keys]
snake_case_ : int = df.reindex(_UpperCamelCase , axis='''columns''' ) # reorder cols
# capitalize
snake_case_ : Optional[int] = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
snake_case_ : Any = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
snake_case_ : int = df.rename(lambda _UpperCamelCase : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
snake_case_ : Tuple = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=_UpperCamelCase , floatfmt='''.2f''' )]
print('''\n\n'''.join(_UpperCamelCase ) )
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
snake_case_ : Any = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=_UpperCamelCase , type=_UpperCamelCase , nargs='''+''' , required=_UpperCamelCase , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=_UpperCamelCase , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=_UpperCamelCase , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=_UpperCamelCase , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=_UpperCamelCase , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
snake_case_ : Tuple = parser.parse_args()
snake_case_ : Optional[Any] = args.output_dir
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
snake_case_ : Optional[int] = get_base_command(_UpperCamelCase , _UpperCamelCase )
# split each dimension into its --foo variations
snake_case_ : Optional[int] = [list(map(str.strip , re.split(R'''\|''' , _UpperCamelCase ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
snake_case_ : List[str] = list(map(str.strip , map(''' '''.join , itertools.product(*_UpperCamelCase ) ) ) )
snake_case_ : Optional[int] = max(len(_UpperCamelCase ) for x in variations )
# split wanted keys
snake_case_ : int = args.report_metric_keys.split()
# capture prints into a log file for convenience
snake_case_ : str = f'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
snake_case_ : Tuple = Tee(_UpperCamelCase )
print(f'''\n*** Running {len(_UpperCamelCase )} benchmarks:''' )
print(f'''Base command: {" ".join(_UpperCamelCase )}''' )
snake_case_ : List[Any] = '''variation'''
snake_case_ : Tuple = []
for id, variation in enumerate(tqdm(_UpperCamelCase , desc='''Total completion: ''' , leave=_UpperCamelCase ) ):
snake_case_ : Optional[Any] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.repeat_times , _UpperCamelCase , args.verbose , ) )
process_results(_UpperCamelCase , args.target_metric_key , _UpperCamelCase , args.base_variation , _UpperCamelCase )
if __name__ == "__main__":
main()
| 279 | 1 |
def __lowerCamelCase ( __magic_name__ : list , __magic_name__ : list , __magic_name__ : int ):
a__: int =len(__magic_name__ )
a__: Optional[int] =[[0] * n for i in range(__magic_name__ )]
for i in range(__magic_name__ ):
a__: Dict =y_points[i]
for i in range(2 , __magic_name__ ):
for j in range(__magic_name__ , __magic_name__ ):
a__: Any =(
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
def __lowerCamelCase ( __magic_name__ : int ):
if not isinstance(__magic_name__ , __magic_name__ ):
a__: List[str] =F"Input value of [number={number}] must be an integer"
raise TypeError(__magic_name__ )
if number < 1:
a__: Union[str, Any] =F"Input value of [number={number}] must be > 0"
raise ValueError(__magic_name__ )
a__: List[Any] =1
for i in range(1 , __magic_name__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 | 1 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Optional[int]=99 , lowerCAmelCase__ : int=32 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : List[str]=4 , lowerCAmelCase__ : Union[str, Any]=37 , lowerCAmelCase__ : Union[str, Any]="gelu" , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[int]=0.1 , lowerCAmelCase__ : Optional[int]=512 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : Optional[Any]=0.02 , lowerCAmelCase__ : Dict=4 , ):
SCREAMING_SNAKE_CASE_: Any = parent
SCREAMING_SNAKE_CASE_: str = batch_size
SCREAMING_SNAKE_CASE_: Any = seq_length
SCREAMING_SNAKE_CASE_: int = is_training
SCREAMING_SNAKE_CASE_: Optional[int] = use_attention_mask
SCREAMING_SNAKE_CASE_: List[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_: Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_: Optional[Any] = vocab_size
SCREAMING_SNAKE_CASE_: str = hidden_size
SCREAMING_SNAKE_CASE_: Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_: List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: int = intermediate_size
SCREAMING_SNAKE_CASE_: Any = hidden_act
SCREAMING_SNAKE_CASE_: Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: int = max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: Dict = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Any = initializer_range
SCREAMING_SNAKE_CASE_: Optional[int] = num_choices
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_: Tuple = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_: Any = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_: List[Any] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_: Dict = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = config_and_inputs
SCREAMING_SNAKE_CASE_: List[str] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_: Union[str, Any] = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[Any] = FlaxRobertaPreLayerNormModelTester(self)
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_: str = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = model(np.ones((1, 1)))
self.assertIsNotNone(lowerCAmelCase__)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa)
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)[0]
SCREAMING_SNAKE_CASE_: List[Any] = [1, 11, 5_0265]
self.assertEqual(list(output.shape) , lowerCAmelCase__)
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_: Any = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict):
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa)
SCREAMING_SNAKE_CASE_: Dict = model(lowerCAmelCase__)[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE_: List[str] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase__ , atol=1E-4))
| 13 |
'''simple docstring'''
import math
import unittest
def snake_case ( UpperCAmelCase )-> bool:
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class UpperCamelCase__ ( unittest.TestCase):
def lowercase_ ( self :List[Any] ) -> str:
'''simple docstring'''
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def lowercase_ ( self :Union[str, Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(_A ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , 'Zero doesn\'t have any positive factors, primes must have exactly two.' , )
self.assertFalse(
is_prime(1 ) , 'One only has 1 positive factor, primes must have exactly two.' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 161 | 0 |
from __future__ import annotations
def UpperCamelCase ( _a , _a ) -> float:
'''simple docstring'''
lowercase_ :str = sorted(numsa + numsa )
lowercase_ , lowercase_ :int = divmod(len(_a ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : Any = [float(x) for x in input("Enter the elements of first array: ").split()]
SCREAMING_SNAKE_CASE : List[str] = [float(x) for x in input("Enter the elements of second array: ").split()]
print(f"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 252 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Any = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : Optional[int] ="""decision_transformer"""
lowercase : Dict =["""past_key_values"""]
lowercase : Any ={
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , UpperCamelCase_=17 , UpperCamelCase_=4 , UpperCamelCase_=128 , UpperCamelCase_=4096 , UpperCamelCase_=True , UpperCamelCase_=1 , UpperCamelCase_=1024 , UpperCamelCase_=3 , UpperCamelCase_=1 , UpperCamelCase_=None , UpperCamelCase_="relu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.02 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=5_0256 , UpperCamelCase_=5_0256 , UpperCamelCase_=False , UpperCamelCase_=False , **UpperCamelCase_ , ):
lowercase_ :Any = state_dim
lowercase_ :List[str] = act_dim
lowercase_ :List[str] = hidden_size
lowercase_ :int = max_ep_len
lowercase_ :List[str] = action_tanh
lowercase_ :Any = vocab_size
lowercase_ :List[Any] = n_positions
lowercase_ :List[str] = n_layer
lowercase_ :Optional[Any] = n_head
lowercase_ :int = n_inner
lowercase_ :List[str] = activation_function
lowercase_ :List[str] = resid_pdrop
lowercase_ :Dict = embd_pdrop
lowercase_ :List[Any] = attn_pdrop
lowercase_ :Union[str, Any] = layer_norm_epsilon
lowercase_ :List[str] = initializer_range
lowercase_ :Any = scale_attn_weights
lowercase_ :Union[str, Any] = use_cache
lowercase_ :Any = scale_attn_by_inverse_layer_idx
lowercase_ :Tuple = reorder_and_upcast_attn
lowercase_ :int = bos_token_id
lowercase_ :List[str] = eos_token_id
super().__init__(bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 252 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ) -> str:
lowercase_ = tempfile.mkdtemp()
# fmt: off
lowercase_ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase_ = {
'''do_resize''': True,
'''size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any , **SCREAMING_SNAKE_CASE_ : Any ) -> Optional[Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[int] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_image_processor()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> List[Any]:
lowercase_ = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
lowercase_ = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = self.prepare_image_inputs()
lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
lowercase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : List[Any] ) -> Optional[int]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowercase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Dict:
lowercase_ = self.get_image_processor()
lowercase_ = self.get_tokenizer()
lowercase_ = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase_ = '''lower newer'''
lowercase_ = self.prepare_image_inputs()
lowercase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 30 |
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30 | 1 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
lowerCAmelCase :List[str] = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any , _A : str , _A : bool , _A : str = None , _A : list = None ) -> str:
__magic_name__ : List[str] = None
__magic_name__ : int = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
__magic_name__ : Optional[int] = os.path.abspath('examples' )
for item in os.listdir(_A ):
if item not in EXCLUDE_EXAMPLES:
__magic_name__ : Optional[Any] = os.path.join(_A , _A )
if os.path.isfile(_A ) and ".py" in item_path:
with self.subTest(
tested_script=_A , feature_script=_A , tested_section='main()' if parser_only else 'training_function()' , ):
__magic_name__ : Optional[int] = compare_against_test(
os.path.join(_A , _A ) , _A , _A , _A )
__magic_name__ : Union[str, Any] = '\n'.join(_A )
if special_strings is not None:
for string in special_strings:
__magic_name__ : Dict = diff.replace(_A , '' )
self.assertEqual(_A , '' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
self.one_complete_example('complete_nlp_example.py' , _A )
self.one_complete_example('complete_nlp_example.py' , _A )
def __lowerCAmelCase ( self : str ) -> Any:
__magic_name__ : Dict = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
__magic_name__ : Dict = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
self.one_complete_example('complete_cv_example.py' , _A , _A , _A )
@mock.patch.dict(os.environ , {"""TESTING_MOCKED_DATALOADERS""": """1"""} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = False
@classmethod
def __lowerCAmelCase ( cls : str ) -> List[str]:
super().setUpClass()
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__magic_name__ : List[str] = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __lowerCAmelCase ( cls : Tuple ) -> Tuple:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : List[str] = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : List[str] = F'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
__magic_name__ : int = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
__magic_name__ : List[Any] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
__magic_name__ : Optional[Any] = run_command(self._launch_args + testargs , return_stdout=_A )
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = F'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
__magic_name__ : List[Any] = run_command(self._launch_args + testargs , return_stdout=_A )
if torch.cuda.is_available():
__magic_name__ : List[str] = torch.cuda.device_count()
else:
__magic_name__ : Optional[Any] = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
else:
self.assertIn('epoch 0:' , _A )
self.assertIn('epoch 1:' , _A )
@slow
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Any = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
__magic_name__ : List[str] = run_command(self._launch_args + testargs , return_stdout=_A )
__magic_name__ : Union[str, Any] = re.findall('({.+})' , _A )
__magic_name__ : List[str] = [r for r in results if 'accuracy' in r][-1]
__magic_name__ : Union[str, Any] = ast.literal_eval(_A )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : str = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __lowerCAmelCase ( self : str ) -> Any:
with tempfile.TemporaryDirectory() as tmpdir:
__magic_name__ : Union[str, Any] = F'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_A , 'tracking' ) ) )
def __lowerCAmelCase ( self : int ) -> Dict:
__magic_name__ : Optional[Any] = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Dict = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs ) | 275 |
'''simple docstring'''
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCAmelCase :str = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCAmelCase :int = '''
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")
>>> pipe_prior.to("cuda")
>>> prompt = "red cat, 4k photo"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")
>>> pipe.to("cuda")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save("cat.png")
```
'''
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=8 ):
"""simple docstring"""
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : str = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ) -> int:
super().__init__()
self.register_modules(
text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[int] , _A : Dict , _A : str , _A : List[str] ) -> str:
if latents is None:
__magic_name__ : Any = randn_tensor(_A , generator=_A , device=_A , dtype=_A )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
__magic_name__ : int = latents.to(_A )
__magic_name__ : Union[str, Any] = latents * scheduler.init_noise_sigma
return latents
def __lowerCAmelCase ( self : List[Any] , _A : List[str] , _A : List[str] , _A : List[str] , _A : List[Any] , _A : str=None , ) -> Dict:
__magic_name__ : Optional[Any] = len(_A ) if isinstance(_A , _A ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_A , padding='max_length' , truncation=_A , max_length=77 , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Optional[Any] = self.tokenizer(_A , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A ):
__magic_name__ : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_A )
__magic_name__ : Dict = text_inputs.attention_mask.to(_A )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_A , attention_mask=_A )
__magic_name__ : Tuple = prompt_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : int = text_encoder_hidden_states.repeat_interleave(_A , dim=0 )
__magic_name__ : Union[str, Any] = text_mask.repeat_interleave(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[Any] = [''] * batch_size
elif type(_A ) is not type(_A ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(_A )} !='
F' {type(_A )}.' )
elif isinstance(_A , _A ):
__magic_name__ : int = [negative_prompt]
elif batch_size != len(_A ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(_A )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__magic_name__ : Dict = negative_prompt
__magic_name__ : List[str] = self.tokenizer(
_A , padding='max_length' , max_length=77 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors='pt' , )
__magic_name__ : Optional[int] = uncond_input.input_ids.to(_A )
__magic_name__ : Optional[Any] = uncond_input.attention_mask.to(_A )
__magic_name__ , __magic_name__ : int = self.text_encoder(
input_ids=_A , attention_mask=_A )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : List[str] = negative_prompt_embeds.shape[1]
__magic_name__ : str = negative_prompt_embeds.repeat(1 , _A )
__magic_name__ : Dict = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A )
__magic_name__ : Any = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1 )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _A , -1 )
__magic_name__ : List[Any] = uncond_text_mask.repeat_interleave(_A , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : str = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : str = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCAmelCase ( self : Dict , _A : List[Any]=0 ) -> Tuple:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__magic_name__ : List[Any] = torch.device(F'cuda:{gpu_id}' )
__magic_name__ : Dict = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_A , _A )
def __lowerCAmelCase ( self : List[Any] , _A : List[str]=0 ) -> str:
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__magic_name__ : int = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_A )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : Optional[int] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Union[str, Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : List[str] = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A )
# We'll offload the last model manually.
__magic_name__ : Any = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self : int ) -> List[str]:
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_A , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_A )
def __call__( self : int , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 512 , _A : int = 512 , _A : int = 100 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ) -> Optional[int]:
if isinstance(_A , _A ):
__magic_name__ : Optional[int] = 1
elif isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = len(_A )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(_A )}' )
__magic_name__ : Tuple = self._execution_device
__magic_name__ : Any = batch_size * num_images_per_prompt
__magic_name__ : int = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self._encode_prompt(
_A , _A , _A , _A , _A )
if isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = torch.cat(_A , dim=0 )
if isinstance(_A , _A ):
__magic_name__ : Dict = torch.cat(_A , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : Dict = image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0 )
__magic_name__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_A )
self.scheduler.set_timesteps(_A , device=_A )
__magic_name__ : Tuple = self.scheduler.timesteps
__magic_name__ : Optional[int] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : Dict = get_new_h_w(_A , _A , self.movq_scale_factor )
# create initial latent
__magic_name__ : Union[str, Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , )
for i, t in enumerate(self.progress_bar(_A ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Tuple = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__magic_name__ : Union[str, Any] = self.unet(
sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Dict = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : List[str] = variance_pred.chunk(2 )
__magic_name__ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : List[Any] = self.scheduler.step(
_A , _A , _A , generator=_A , ).prev_sample
# post-processing
__magic_name__ : int = self.movq.decode(_A , force_not_quantize=_A )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
__magic_name__ : Dict = image * 0.5 + 0.5
__magic_name__ : str = image.clamp(0 , 1 )
__magic_name__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : str = self.numpy_to_pil(_A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_A ) | 275 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _SCREAMING_SNAKE_CASE () -> Union[str, Any]:
'''simple docstring'''
lowercase_ = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
lowercase_ = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
lowercase_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
lowercase_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 136 |
"""simple docstring"""
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
UpperCAmelCase : Optional[Any] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = end_pointa[0] - end_pointa[0]
lowercase_ = end_pointa[1] - end_pointa[1]
lowercase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> Vectorad:
'''simple docstring'''
lowercase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase ) -> bool:
'''simple docstring'''
return tuple(round(__lowerCAmelCase , __lowerCAmelCase ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 10 ) -> bool:
'''simple docstring'''
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
lowercase_ = create_vector(__lowerCAmelCase , __lowerCAmelCase )
return is_zero_vector(get_ad_vectors_cross(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase )
| 136 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Dict = """ctrl"""
snake_case : int = ["""past_key_values"""]
snake_case : Tuple = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , __lowerCAmelCase=246534 , __lowerCAmelCase=256 , __lowerCAmelCase=1280 , __lowerCAmelCase=8192 , __lowerCAmelCase=48 , __lowerCAmelCase=16 , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=1E-6 , __lowerCAmelCase=0.02 , __lowerCAmelCase=True , **__lowerCAmelCase , ):
UpperCamelCase__ = vocab_size
UpperCamelCase__ = n_positions
UpperCamelCase__ = n_embd
UpperCamelCase__ = n_layer
UpperCamelCase__ = n_head
UpperCamelCase__ = dff
UpperCamelCase__ = resid_pdrop
UpperCamelCase__ = embd_pdrop
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_range
UpperCamelCase__ = use_cache
super().__init__(**__lowerCAmelCase )
| 87 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
UpperCamelCase__ = """ZinengTang/tvlt-base"""
UpperCamelCase__ = tempfile.mkdtemp()
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltImageProcessor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self , **__lowerCAmelCase ):
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , __lowerCAmelCase )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = feature_extractor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , return_tensors="""np""" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
UpperCamelCase__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
UpperCamelCase__ = np.ones([12000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def _lowerCamelCase ( self ):
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=__lowerCAmelCase , feature_extractor=__lowerCAmelCase )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
| 87 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__(self : int , _A : Dict , _A : int=7 , _A : List[str]=3 , _A : Any=18 , _A : Tuple=30 , _A : Tuple=4_00 , _A : Dict=True , _A : Any=None , _A : Union[str, Any]=True , _A : Union[str, Any]=None , _A : Optional[Any]=True , _A : str=[0.5, 0.5, 0.5] , _A : Any=[0.5, 0.5, 0.5] , ) -> str:
__snake_case : Any = size if size is not None else {'shortest_edge': 18}
__snake_case : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
__snake_case : Optional[int] = parent
__snake_case : List[str] = batch_size
__snake_case : int = num_channels
__snake_case : Tuple = image_size
__snake_case : Optional[int] = min_resolution
__snake_case : Union[str, Any] = max_resolution
__snake_case : List[str] = do_resize
__snake_case : List[str] = size
__snake_case : List[str] = do_center_crop
__snake_case : str = crop_size
__snake_case : Optional[int] = do_normalize
__snake_case : Optional[int] = image_mean
__snake_case : Optional[Any] = image_std
def _lowercase (self : Optional[int]) -> Tuple:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase , unittest.TestCase ):
UpperCAmelCase : int = LevitImageProcessor if is_vision_available() else None
def _lowercase (self : str) -> List[Any]:
__snake_case : int = LevitImageProcessingTester(self)
@property
def _lowercase (self : Optional[Any]) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase (self : Union[str, Any]) -> str:
__snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_A , 'image_mean'))
self.assertTrue(hasattr(_A , 'image_std'))
self.assertTrue(hasattr(_A , 'do_normalize'))
self.assertTrue(hasattr(_A , 'do_resize'))
self.assertTrue(hasattr(_A , 'do_center_crop'))
self.assertTrue(hasattr(_A , 'size'))
def _lowercase (self : Union[str, Any]) -> Dict:
__snake_case : Tuple = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 18})
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18})
__snake_case : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def _lowercase (self : str) -> int:
pass
def _lowercase (self : Union[str, Any]) -> str:
# Initialize image_processing
__snake_case : str = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__snake_case : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A)
for image in image_inputs:
self.assertIsInstance(_A , Image.Image)
# Test not batched input
__snake_case : List[str] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : Tuple) -> Dict:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A)
for image in image_inputs:
self.assertIsInstance(_A , np.ndarray)
# Test not batched input
__snake_case : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : List[Any] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def _lowercase (self : int) -> int:
# Initialize image_processing
__snake_case : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__snake_case : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A)
for image in image_inputs:
self.assertIsInstance(_A , torch.Tensor)
# Test not batched input
__snake_case : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
__snake_case : Optional[Any] = image_processing(_A , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 172 | """simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172 | 1 |
def _a ( lowerCamelCase: list[int] , lowerCamelCase: list[int] ) -> Union[str, Any]:
'''simple docstring'''
if not len(lowerCamelCase ) == len(lowerCamelCase ) == 3:
raise ValueError('''Please enter a valid equation.''' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('''Both a & b of two equations can\'t be zero.''' )
# Extract the coefficients
__A = equationa
__A = equationa
# Calculate the determinants of the matrices
__A = aa * ba - aa * ba
__A = ca * ba - ca * ba
__A = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('''Infinite solutions. (Consistent system)''' )
else:
raise ValueError('''No solution. (Inconsistent system)''' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__A = determinant_x / determinant
__A = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 360 |
from __future__ import annotations
from math import pi, sqrt
def _a ( lowerCamelCase: float , lowerCamelCase: float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.