code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Tuple=13 , __lowercase : int=7 , __lowercase : Any=True , __lowercase : Any=True , __lowercase : List[Any]=False , __lowercase : Tuple=True , __lowercase : List[str]=99 , __lowercase : Optional[int]=32 , __lowercase : List[str]=5 , __lowercase : Optional[int]=4 , __lowercase : List[str]=37 , __lowercase : str="gelu" , __lowercase : Any=0.1 , __lowercase : int=0.1 , __lowercase : Any=512 , __lowercase : Optional[int]=16 , __lowercase : Optional[int]=2 , __lowercase : Dict=0.02 , __lowercase : Tuple=3 , __lowercase : Optional[Any]=4 , __lowercase : Dict=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , use_stable_embedding=__lowercase , )
def UpperCamelCase_ ( self : List[str] , __lowercase : Optional[int] , __lowercase : Dict , __lowercase : List[str] , __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any] , __lowercase : Any ):
'''simple docstring'''
__a = OpenLlamaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase )
__a = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Union[str, Any] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Dict , __lowercase : Tuple , __lowercase : int , __lowercase : str , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[int] , ):
'''simple docstring'''
__a = True
__a = OpenLlamaModel(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
__a = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Optional[int] , __lowercase : List[Any] , __lowercase : Tuple , __lowercase : Tuple , __lowercase : Tuple , __lowercase : List[str] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[int] , __lowercase : Optional[Any] , ):
'''simple docstring'''
__a = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : str , __lowercase : Union[str, Any] , __lowercase : Union[str, Any] , __lowercase : Optional[Any] , __lowercase : Optional[int] , __lowercase : Optional[Any] , __lowercase : List[str] , __lowercase : int , __lowercase : Dict , __lowercase : Dict , ):
'''simple docstring'''
__a = True
__a = True
__a = OpenLlamaForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
__a = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([input_mask, next_mask] , dim=-1 )
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
__a = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-3 ) )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : Optional[int] =(
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
__lowerCamelCase : List[str] =(OpenLlamaForCausalLM,) if is_torch_available() else ()
__lowerCamelCase : Tuple =(
{
'feature-extraction': OpenLlamaModel,
'text-classification': OpenLlamaForSequenceClassification,
'text-generation': OpenLlamaForCausalLM,
'zero-shot': OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCamelCase : Tuple =False
__lowerCamelCase : Any =False
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a = OpenLlamaModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """single_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = """multi_label_classification"""
__a = input_dict["""input_ids"""]
__a = input_ids.ne(1 ).to(__lowercase )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = OpenLlamaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""Open-Llama buffers include complex numbers, which breaks this test""" )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCamelCase_ ( self : List[Any] , __lowercase : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ids_tensor([1, 10] , config.vocab_size )
__a = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = OpenLlamaModel(__lowercase )
original_model.to(__lowercase )
original_model.eval()
__a = original_model(__lowercase ).last_hidden_state
__a = original_model(__lowercase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__a = {"""type""": scaling_type, """factor""": 10.0}
__a = OpenLlamaModel(__lowercase )
scaled_model.to(__lowercase )
scaled_model.eval()
__a = scaled_model(__lowercase ).last_hidden_state
__a = scaled_model(__lowercase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(__lowercase , __lowercase , atol=1E-5 ) )
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ : Optional[int] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''CLIPFeatureExtractor''']
lowerCAmelCase_ : Optional[int] = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[Any] = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Dict = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 362
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowerCAmelCase ( __a ):
def snake_case_ (self ):
_UpperCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """tf_padding""" ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , """depth_multiplier""" ) )
class __lowerCAmelCase :
def __init__(self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=3 , lowerCAmelCase__=3_2 , lowerCAmelCase__=0.2_5 , lowerCAmelCase__=8 , lowerCAmelCase__=True , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=3_2 , lowerCAmelCase__="relu6" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=1_0 , lowerCAmelCase__=None , ):
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : int = image_size
_UpperCAmelCase : List[str] = depth_multiplier
_UpperCAmelCase : Any = min_depth
_UpperCAmelCase : Dict = tf_padding
_UpperCAmelCase : Dict = int(last_hidden_size * depth_multiplier )
_UpperCAmelCase : Dict = output_stride
_UpperCAmelCase : Optional[int] = hidden_act
_UpperCAmelCase : Optional[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : int = num_labels
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Any = scope
def snake_case_ (self ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def snake_case_ (self ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = MobileNetVaModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def snake_case_ (self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Tuple = MobileNetVaForImageClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ (self ):
_UpperCAmelCase : List[str] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __a , __a , unittest.TestCase ):
snake_case : Tuple = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
snake_case : Optional[Any] = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
snake_case : str = False
snake_case : str = False
snake_case : Optional[Any] = False
snake_case : Optional[int] = False
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaModelTester(self )
_UpperCAmelCase : str = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ )
def snake_case_ (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""" )
def snake_case_ (self ):
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""" )
def snake_case_ (self ):
pass
def snake_case_ (self ):
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(lowerCAmelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case_ (self ):
def check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 2_6
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ (self ):
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ )
@slow
def snake_case_ (self ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[str] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __A ( ):
_UpperCAmelCase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
@cached_property
def snake_case_ (self ):
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""" ) if is_vision_available() else None
)
@slow
def snake_case_ (self ):
_UpperCAmelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""" ).to(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCAmelCase__ )
# verify the logits
_UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCAmelCase : str = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1e-4 ) )
| 170
| 0
|
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_a : int= WebClient(token=os.environ["CI_SLACK_BOT_TOKEN"])
def __UpperCAmelCase ( UpperCAmelCase_ : Optional[Any] ) -> Any:
'''simple docstring'''
__snake_case : Optional[int] = test_results.split(' ' )
__snake_case : Tuple = 0
__snake_case : Optional[int] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__snake_case : str = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(UpperCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __UpperCAmelCase ( UpperCAmelCase_ : Any ) -> Dict:
'''simple docstring'''
__snake_case : Dict = {}
__snake_case : Any = None
__snake_case : int = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , UpperCAmelCase_ ):
__snake_case : Union[str, Any] = True
__snake_case : Optional[Any] = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
__snake_case : Optional[int] = line
__snake_case : List[str] = False
return failures
class UpperCamelCase :
def __init__(self : Any , _A : str , _A : Dict) -> List[str]:
__snake_case : List[str] = title
__snake_case : List[Any] = doc_test_results['time_spent'].split(',')[0]
__snake_case : List[Any] = doc_test_results['success']
__snake_case : Tuple = doc_test_results['failures']
__snake_case : List[str] = self.n_success + self.n_failures
# Failures and success of the modeling tests
__snake_case : Any = doc_test_results
@property
def _lowercase (self : Tuple) -> str:
__snake_case : List[str] = [self._time_spent]
__snake_case : Any = 0
for time in time_spent:
__snake_case : int = time.split(':')
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A) == 1:
__snake_case : str = [0, 0, time_parts[0]]
__snake_case , __snake_case , __snake_case : Dict = int(time_parts[0]), int(time_parts[1]), float(time_parts[2])
total_secs += hours * 36_00 + minutes * 60 + seconds
__snake_case , __snake_case , __snake_case : List[Any] = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"{int(_A)}h{int(_A)}m{int(_A)}s"
@property
def _lowercase (self : List[str]) -> Dict:
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _lowercase (self : Union[str, Any]) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _lowercase (self : str) -> Dict:
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def _lowercase (self : Any) -> Dict:
__snake_case : str = 40
__snake_case : Optional[Any] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_A , _A)}
__snake_case : List[str] = ''
for category, failures in category_failures.items():
if len(_A) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2).rjust(line_length // 2) + "\n"
report += "`"
report += "`\n`".join(_A)
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def _lowercase (self : List[str]) -> str:
__snake_case : Tuple = [self.header]
if self.n_failures > 0:
blocks.append(self.failures)
if self.n_failures > 0:
blocks.extend([self.category_failures])
if self.n_failures == 0:
blocks.append(self.no_failures)
return json.dumps(_A)
@staticmethod
def _lowercase () -> Any:
__snake_case : Optional[int] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(_A)}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_A , )
def _lowercase (self : List[Any]) -> Optional[Any]:
print('Sending the following payload')
print(json.dumps({'blocks': json.loads(self.payload)}))
__snake_case : str = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.'
__snake_case : Any = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_A , )
def _lowercase (self : Tuple , _A : Dict , _A : List[Any] , _A : Optional[Any] , _A : Any) -> Optional[int]:
__snake_case : Tuple = ''
for key, value in failures.items():
__snake_case : List[str] = value[:2_00] + ' [Truncated]' if len(_A) > 2_50 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__snake_case : Optional[int] = job_name
__snake_case : Union[str, Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
__snake_case : Union[str, Any] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _lowercase (self : Dict) -> Dict:
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.')
__snake_case : Optional[int] = self.doc_test_results.pop('job_link')
self.doc_test_results.pop('failures')
self.doc_test_results.pop('success')
self.doc_test_results.pop('time_spent')
__snake_case : List[Any] = sorted(self.doc_test_results.items() , key=lambda _A: t[0])
for job, job_result in sorted_dict:
if len(job_result['failures']):
__snake_case : Any = f"*Num failures* :{len(job_result['failed'])} \n"
__snake_case : Tuple = job_result['failures']
__snake_case : str = self.get_reply_blocks(_A , _A , _A , text=_A)
print('Sending the following reply')
print(json.dumps({'blocks': blocks}))
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"Results for {job}" , blocks=_A , thread_ts=self.thread_ts['ts'] , )
time.sleep(1)
def __UpperCAmelCase ( ) -> Any:
'''simple docstring'''
__snake_case : Any = os.environ['GITHUB_RUN_ID']
__snake_case : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__snake_case : Optional[int] = requests.get(UpperCAmelCase_ ).json()
__snake_case : int = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
__snake_case : Optional[int] = math.ceil((result['total_count'] - 1_00) / 1_00 )
for i in range(UpperCAmelCase_ ):
__snake_case : Optional[Any] = requests.get(url + F"&page={i + 2}" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , UpperCAmelCase_ )
return {}
def __UpperCAmelCase ( UpperCAmelCase_ : str ) -> Any:
'''simple docstring'''
__snake_case : Any = {}
if os.path.exists(UpperCAmelCase_ ):
__snake_case : List[Any] = os.listdir(UpperCAmelCase_ )
for file in files:
try:
with open(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , encoding='utf-8' ) as f:
__snake_case : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"Could not open {os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )}." ) from e
return _artifact
def __UpperCAmelCase ( ) -> List[str]:
'''simple docstring'''
class UpperCamelCase :
def __init__(self : str , _A : str) -> Dict:
__snake_case : List[str] = name
__snake_case : str = []
def __str__(self : Dict) -> int:
return self.name
def _lowercase (self : Any , _A : str) -> Optional[int]:
self.paths.append({'name': self.name, 'path': path})
__snake_case : Dict[str, Artifact] = {}
__snake_case : Optional[Any] = filter(os.path.isdir , os.listdir() )
for directory in directories:
__snake_case : Any = directory
if artifact_name not in _available_artifacts:
__snake_case : Optional[int] = Artifact(UpperCAmelCase_ )
_available_artifacts[artifact_name].add_path(UpperCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
_a : Optional[Any]= get_job_links()
_a : int= retrieve_available_artifacts()
_a : str= collections.OrderedDict(
[
("*.py", "API Examples"),
("*.md", "MD Examples"),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_a : Union[str, Any]= {
v: {
"failed": [],
"failures": {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_a : Tuple= github_actions_job_links.get("run_doctests")
_a : int= available_artifacts["doc_tests_gpu_test_reports"].paths[0]
_a : str= retrieve_artifact(artifact_path["name"])
if "stats" in artifact:
_a, _a, _a : List[Any]= handle_test_results(artifact["stats"])
_a : Optional[Any]= failed
_a : Optional[int]= success
_a : Optional[Any]= time_spent[1:-1] + ", "
_a : str= extract_first_line_failure(artifact["failures_short"])
for line in artifact["summary_short"].split("\n"):
if re.search("FAILED", line):
_a : List[str]= line.replace("FAILED ", "")
_a : Dict= line.split()[0].replace("\n", "")
if "::" in line:
_a, _a : Union[str, Any]= line.split("::")
else:
_a, _a : int= line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_a : List[Any]= docs[file_regex]
doc_test_results[category]["failed"].append(test)
_a : Any= all_failures[test] if test in all_failures else "N/A"
_a : int= failure
break
_a : str= Message("🤗 Results of the doc tests.", doc_test_results)
message.post()
message.post_reply()
| 172
|
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
while a != 0:
__snake_case , __snake_case : Optional[Any] = b % a, a
return b
def __UpperCAmelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
'''simple docstring'''
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) != 1:
__snake_case : Optional[Any] = F"mod inverse of {a!r} and {m!r} does not exist"
raise ValueError(UpperCAmelCase_ )
__snake_case , __snake_case , __snake_case : Optional[int] = 1, 0, a
__snake_case , __snake_case , __snake_case : int = 0, 1, m
while va != 0:
__snake_case : Union[str, Any] = ua // va
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 172
| 1
|
from __future__ import annotations
__UpperCAmelCase = 10
def __UpperCamelCase ( lowercase__ : list[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = 1
lowerCAmelCase_ : Tuple = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
lowerCAmelCase_ : list[list] = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
lowerCAmelCase_ : Dict = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
lowerCAmelCase_ : List[Any] = 0
for b in range(lowercase__ ):
for i in buckets[b]:
lowerCAmelCase_ : int = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase = [
'good first issue',
'good second issue',
'good difficult issue',
'feature request',
'new model',
'wip',
]
def __UpperCamelCase ( ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
lowerCAmelCase_ : Tuple = g.get_repo("""huggingface/transformers""" )
lowerCAmelCase_ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCAmelCase_ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
lowerCAmelCase_ : str = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 28
| 1
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ : Dict = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=8 ) -> Optional[int]:
lowerCamelCase : Union[str, Any] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
lowerCamelCase : Any = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> Any:
super().__init__()
self.register_modules(
text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , )
lowerCamelCase : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
if latents is None:
lowerCamelCase : Dict = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowerCamelCase : str = latents.to(UpperCamelCase__ )
lowerCamelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , ) -> Tuple:
lowerCamelCase : int = len(UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else 1
# get prompt text embeddings
lowerCamelCase : Optional[int] = self.tokenizer(
UpperCamelCase__ , padding="max_length" , truncation=UpperCamelCase__ , max_length=77 , return_attention_mask=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" , )
lowerCamelCase : str = text_inputs.input_ids
lowerCamelCase : Dict = self.tokenizer(UpperCamelCase__ , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
lowerCamelCase : int = text_input_ids.to(UpperCamelCase__ )
lowerCamelCase : int = text_inputs.attention_mask.to(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.text_encoder(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
lowerCamelCase : Any = prompt_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : str = text_encoder_hidden_states.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : List[str] = text_mask.repeat_interleave(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[str]
if negative_prompt is None:
lowerCamelCase : Optional[int] = [""] * batch_size
elif type(UpperCamelCase__ ) is not type(UpperCamelCase__ ):
raise TypeError(
F'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase__ )} !='''
F''' {type(UpperCamelCase__ )}.''' )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = [negative_prompt]
elif batch_size != len(UpperCamelCase__ ):
raise ValueError(
F'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase__ )}, but `prompt`:'''
F''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
lowerCamelCase : int = negative_prompt
lowerCamelCase : str = self.tokenizer(
UpperCamelCase__ , padding="max_length" , max_length=77 , truncation=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="pt" , )
lowerCamelCase : Optional[int] = uncond_input.input_ids.to(UpperCamelCase__ )
lowerCamelCase : Any = uncond_input.attention_mask.to(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Any = self.text_encoder(
input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowerCamelCase : Tuple = negative_prompt_embeds.shape[1]
lowerCamelCase : str = negative_prompt_embeds.repeat(1 , UpperCamelCase__ )
lowerCamelCase : List[str] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCamelCase__ )
lowerCamelCase : int = uncond_text_encoder_hidden_states.shape[1]
lowerCamelCase : str = uncond_text_encoder_hidden_states.repeat(1 , UpperCamelCase__ , 1 )
lowerCamelCase : List[str] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCamelCase__ , -1 )
lowerCamelCase : Any = uncond_text_mask.repeat_interleave(UpperCamelCase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase : Optional[int] = torch.cat([negative_prompt_embeds, prompt_embeds] )
lowerCamelCase : Union[str, Any] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
lowerCamelCase : List[Any] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _lowercase ( self , UpperCamelCase__=0 ) -> Union[str, Any]:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowerCamelCase : Union[str, Any] = torch.device(F'''cuda:{gpu_id}''' )
lowerCamelCase : Any = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowerCamelCase : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=UpperCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowerCamelCase : str = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
if self.safety_checker is not None:
lowerCamelCase , lowerCamelCase : int = cpu_offload_with_hook(self.safety_checker , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ )
# We'll offload the last model manually.
lowerCamelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self ) -> List[Any]:
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = 512 , UpperCamelCase__ = 512 , UpperCamelCase__ = 100 , UpperCamelCase__ = 4.0 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[str, Any]:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = 1
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Any = len(UpperCamelCase__ )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase__ )}''' )
lowerCamelCase : Tuple = self._execution_device
lowerCamelCase : int = batch_size * num_images_per_prompt
lowerCamelCase : Dict = guidance_scale > 1.0
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = self._encode_prompt(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : int = torch.cat(UpperCamelCase__ , dim=0 )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = torch.cat(UpperCamelCase__ , dim=0 )
if do_classifier_free_guidance:
lowerCamelCase : List[str] = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Optional[Any] = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 )
lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCamelCase__ )
self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ )
lowerCamelCase : int = self.scheduler.timesteps
lowerCamelCase : Optional[Any] = self.unet.config.in_channels
lowerCamelCase , lowerCamelCase : List[str] = get_new_h_w(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor )
# create initial latent
lowerCamelCase : List[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
lowerCamelCase : Dict = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCamelCase : str = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
lowerCamelCase : List[Any] = self.unet(
sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
if do_classifier_free_guidance:
lowerCamelCase , lowerCamelCase : List[str] = noise_pred.split(latents.shape[1] , dim=1 )
lowerCamelCase , lowerCamelCase : Tuple = noise_pred.chunk(2 )
lowerCamelCase , lowerCamelCase : Tuple = variance_pred.chunk(2 )
lowerCamelCase : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowerCamelCase : Optional[Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowerCamelCase , lowerCamelCase : Any = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase : Optional[int] = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
# post-processing
lowerCamelCase : Any = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowerCamelCase : List[str] = image * 0.5 + 0.5
lowerCamelCase : Union[str, Any] = image.clamp(0 , 1 )
lowerCamelCase : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowerCamelCase : Union[str, Any] = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase__ )
| 48
|
from typing import Any
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> list:
"""simple docstring"""
_validation(
__a , __a , __a , __a , __a , )
# Creates data structures and fill initial step
lowerCamelCase__: dict ={}
lowerCamelCase__: dict ={}
for state in states_space:
lowerCamelCase__: Optional[Any] =observations_space[0]
lowerCamelCase__: List[Any] =(
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCamelCase__: int =None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(__a ) ):
lowerCamelCase__: Tuple =observations_space[o]
lowerCamelCase__: Optional[Any] =observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCamelCase__: Tuple =""
lowerCamelCase__: Optional[Any] =-1
for k_state in states_space:
lowerCamelCase__: int =(
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCamelCase__: List[str] =probability
lowerCamelCase__: int =k_state
# Update probabilities and pointers dicts
lowerCamelCase__: Any =(
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCamelCase__: int =arg_max
# The final observation
lowerCamelCase__: Any =observations_space[len(__a ) - 1]
# argmax for given final observation
lowerCamelCase__: Optional[Any] =""
lowerCamelCase__: int =-1
for k_state in states_space:
lowerCamelCase__: Tuple =probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCamelCase__: List[Any] =probability
lowerCamelCase__: Dict =k_state
lowerCamelCase__: str =arg_max
# Process pointers backwards
lowerCamelCase__: Union[str, Any] =last_state
lowerCamelCase__: List[str] =[]
for o in range(len(__a ) - 1 , -1 , -1 ):
result.append(__a )
lowerCamelCase__: Union[str, Any] =pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_not_empty(
__a , __a , __a , __a , __a , )
_validate_lists(__a , __a )
_validate_dicts(
__a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , ) -> None:
"""simple docstring"""
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_list(__a , "observations_space" )
_validate_list(__a , "states_space" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Tuple =F"""{var_name} must be a list"""
raise ValueError(__a )
else:
for x in _object:
if not isinstance(__a , __a ):
lowerCamelCase__: str =F"""{var_name} must be a list of strings"""
raise ValueError(__a )
def lowerCAmelCase_ ( __a , __a , __a , ) -> None:
"""simple docstring"""
_validate_dict(__a , "initial_probabilities" , __a )
_validate_nested_dict(__a , "transition_probabilities" )
_validate_nested_dict(__a , "emission_probabilities" )
def lowerCAmelCase_ ( __a , __a ) -> None:
"""simple docstring"""
_validate_dict(_object , __a , __a )
for x in _object.values():
_validate_dict(__a , __a , __a , __a )
def lowerCAmelCase_ ( __a , __a , __a , __a = False ) -> None:
"""simple docstring"""
if not isinstance(_object , __a ):
lowerCamelCase__: Optional[int] =F"""{var_name} must be a dict"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object ):
lowerCamelCase__: Tuple =F"""{var_name} all keys must be strings"""
raise ValueError(__a )
if not all(isinstance(__a , __a ) for x in _object.values() ):
lowerCamelCase__: Dict ="nested dictionary " if nested else ""
lowerCamelCase__: List[str] =F"""{var_name} {nested_text}all values must be {value_type.__name__}"""
raise ValueError(__a )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 10
| 0
|
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ->int:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCAmelCase , n - 1 , UpperCAmelCase ) * a) % mod
else:
a_ = binary_exponentiation(UpperCAmelCase , n / 2 , UpperCAmelCase )
return (b * b) % mod
# a prime number
UpperCamelCase_ = 701
UpperCamelCase_ = 1000000000
UpperCamelCase_ = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 303
|
"""simple docstring"""
import math
UpperCamelCase_ = 10
UpperCamelCase_ = 7
UpperCamelCase_ = BALLS_PER_COLOUR * NUM_COLOURS
def UpperCamelCase ( UpperCAmelCase = 20 ) ->str:
"""simple docstring"""
a_ = math.comb(UpperCAmelCase , UpperCAmelCase )
a_ = math.comb(NUM_BALLS - BALLS_PER_COLOUR , UpperCAmelCase )
a_ = NUM_COLOURS * (1 - missing_colour / total)
return F'''{result:.9f}'''
if __name__ == "__main__":
print(solution(20))
| 303
| 1
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class UpperCAmelCase_ ( a):
def __init__( self, __a = "▁", __a = True, __a = "<unk>", __a = "</s>", __a = "<pad>", ):
'''simple docstring'''
_lowerCAmelCase : List[str] = {
"pad": {"id": 0, "token": pad_token},
"eos": {"id": 1, "token": eos_token},
"unk": {"id": 2, "token": unk_token},
}
_lowerCAmelCase : str = [None] * len(self.special_tokens)
for token_dict in self.special_tokens.values():
_lowerCAmelCase : Dict = token_dict["token"]
_lowerCAmelCase : int = Tokenizer(Unigram())
_lowerCAmelCase : Any = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(" {2,}"), " "),
normalizers.Lowercase(),
])
_lowerCAmelCase : Tuple = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__a, add_prefix_space=__a),
pre_tokenizers.Digits(individual_digits=__a),
pre_tokenizers.Punctuation(),
])
_lowerCAmelCase : List[str] = decoders.Metaspace(replacement=__a, add_prefix_space=__a)
_lowerCAmelCase : Tuple = TemplateProcessing(
single=f"$A {self.special_tokens['eos']['token']}", special_tokens=[(self.special_tokens["eos"]["token"], self.special_tokens["eos"]["id"])], )
_lowerCAmelCase : List[Any] = {
"model": "SentencePieceUnigram",
"replacement": replacement,
"add_prefix_space": add_prefix_space,
}
super().__init__(__a, __a)
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Dict = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
if isinstance(__a, __a):
_lowerCAmelCase : Union[str, Any] = [files]
self._tokenizer.train(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self, __a, __a = 8000, __a = True, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = trainers.UnigramTrainer(
vocab_size=__a, special_tokens=self.special_tokens_list, show_progress=__a, )
self._tokenizer.train_from_iterator(__a, trainer=__a)
self.add_unk_id()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = json.loads(self._tokenizer.to_str())
_lowerCAmelCase : Tuple = self.special_tokens["unk"]["id"]
_lowerCAmelCase : Any = Tokenizer.from_str(json.dumps(__a))
| 36
|
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase (_a , unittest.TestCase ):
_lowercase = MgpstrTokenizer
_lowercase = False
_lowercase = {}
_lowercase = False
def snake_case_ ( self: int ):
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__UpperCamelCase = dict(zip(A_,range(len(A_ ) ) ) )
__UpperCamelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file,'w',encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def snake_case_ ( self: Dict,**A_: Tuple ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname,**A_ )
def snake_case_ ( self: List[Any],A_: Optional[Any] ):
'''simple docstring'''
__UpperCamelCase = 'tester'
__UpperCamelCase = 'tester'
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def snake_case_ ( self: str ):
'''simple docstring'''
pass
def snake_case_ ( self: List[Any] ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase = '[SPECIAL_TOKEN]'
tokenizer.add_special_tokens({'cls_token': special_token} )
__UpperCamelCase = tokenizer.encode([special_token],add_special_tokens=A_ )
self.assertEqual(len(A_ ),1 )
__UpperCamelCase = tokenizer.decode(A_,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def snake_case_ ( self: Dict ):
'''simple docstring'''
__UpperCamelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
__UpperCamelCase, __UpperCamelCase = self.get_input_output_texts(A_ )
__UpperCamelCase = tokenizer.tokenize(A_ )
__UpperCamelCase = tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase = tokenizer.encode(A_,add_special_tokens=A_ )
self.assertListEqual(A_,A_ )
__UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ),0 )
__UpperCamelCase = tokenizer.decode(A_ )
self.assertIsInstance(A_,A_ )
self.assertEqual(text_a.replace(' ','' ),A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def snake_case_ ( self: int ):
'''simple docstring'''
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def snake_case_ ( self: List[str] ):
'''simple docstring'''
pass
| 310
| 0
|
"""simple docstring"""
from math import sqrt
def _lowerCAmelCase ( UpperCAmelCase : int ):
'''simple docstring'''
UpperCamelCase__ : str =0
for i in range(1 , int(sqrt(UpperCAmelCase ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCAmelCase ):
total += i + n // i
elif i == sqrt(UpperCAmelCase ):
total += i
return total - n
def _lowerCAmelCase ( UpperCAmelCase : int = 10_000 ):
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] =sum(
i
for i in range(1 , UpperCAmelCase )
if sum_of_divisors(sum_of_divisors(UpperCAmelCase ) ) == i and sum_of_divisors(UpperCAmelCase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 157
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_SCREAMING_SNAKE_CASE : str = False
class __a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : Dict =torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCamelCase__ : str =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =generator.manual_seed(0 )
UpperCamelCase__ : str =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str ='''cyberpunk 2077'''
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : int =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase__ : List[str] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Dict =np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger '''
UpperCamelCase__ : Optional[int] =torch.manual_seed(0 )
UpperCamelCase__ : str =pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[Any] =np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Optional[Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Tuple =np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 157
| 1
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def a ( snake_case__: List[str] ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = tokenizer(example['''content'''] , truncation=snake_case__ )['''input_ids''']
lowercase_ = len(example['''content'''] ) / len(output['''input_ids'''] )
return output
__a = HfArgumentParser(PretokenizationArguments)
__a = parser.parse_args()
if args.num_workers is None:
__a = multiprocessing.cpu_count()
__a = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__a = time.time()
__a = load_dataset(args.dataset_name, split='train')
print(f"Dataset loaded in {time.time()-t_start:.2f}s")
__a = time.time()
__a = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
'repo_name',
'path',
'copies',
'size',
'content',
'license',
'hash',
'line_mean',
'line_max',
'alpha_frac',
'autogenerated',
],
)
print(f"Dataset tokenized in {time.time()-t_start:.2f}s")
__a = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
| 30
|
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : bool = False) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(_lowercase), magnitude * sin(_lowercase)]
return [magnitude * cos(radians(_lowercase)), magnitude * sin(radians(_lowercase))]
def lowerCAmelCase_ ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : float = 10**-1) -> bool:
"""simple docstring"""
a__ : NDArray[floataa] = cross(_lowercase , _lowercase)
a__ : float = sum(_lowercase)
return abs(_lowercase) < eps
if __name__ == "__main__":
# Test to check if it works
_lowercase : int =array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
_lowercase : NDArray[floataa] =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
_lowercase : Union[str, Any] =array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
_lowercase : Dict =array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
_lowercase : Tuple =array([[0, -2000], [0, -1200], [0, 1_5600], [0, -1_2400]])
_lowercase : Any =array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 170
| 0
|
import math
def A ( a_ ,a_ ) -> int:
__UpperCamelCase : Optional[Any] =len(a_ )
__UpperCamelCase : int =int(math.floor(math.sqrt(a_ ) ) )
__UpperCamelCase : Any =0
while arr[min(a_ ,a_ ) - 1] < x:
__UpperCamelCase : Tuple =step
step += int(math.floor(math.sqrt(a_ ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__UpperCamelCase : Any =prev + 1
if prev == min(a_ ,a_ ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
A_ :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
A_ :Dict = [int(item) for item in user_input.split(''',''')]
A_ :Optional[Any] = int(input('''Enter the number to be searched:\n'''))
A_ :List[Any] = jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f"Number {x} is at index {res}")
| 245
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =botoa.client('iam' )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=a_ ,AssumeRolePolicyDocument=json.dumps(a_ ,indent=2 ) )
__UpperCamelCase : List[str] ={
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=a_ ,PolicyName=F'{role_name}_policy_permission' ,PolicyDocument=json.dumps(a_ ,indent=2 ) ,)
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'role {role_name} already exists. Using existing one' )
def A ( a_ ) -> Optional[Any]:
__UpperCamelCase : List[Any] =botoa.client('iam' )
return iam_client.get_role(RoleName=a_ )["Role"]["Arn"]
def A ( ) -> Tuple:
__UpperCamelCase : Any =_ask_options(
'How do you want to authorize?' ,['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '] ,a_ ,)
__UpperCamelCase : str =None
if credentials_configuration == 0:
__UpperCamelCase : str =_ask_field('Enter your AWS Profile name: [default] ' ,default='default' )
__UpperCamelCase : Optional[Any] =aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__UpperCamelCase : int =_ask_field('AWS Access Key ID: ' )
__UpperCamelCase : Dict =aws_access_key_id
__UpperCamelCase : Any =_ask_field('AWS Secret Access Key: ' )
__UpperCamelCase : Optional[Any] =aws_secret_access_key
__UpperCamelCase : Tuple =_ask_field('Enter your AWS Region: [us-east-1]' ,default='us-east-1' )
__UpperCamelCase : List[str] =aws_region
__UpperCamelCase : Any =_ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?' ,['Provide IAM Role name', 'Create new IAM role using credentials'] ,a_ ,)
if role_management == 0:
__UpperCamelCase : Optional[Any] =_ask_field('Enter your IAM role name: ' )
else:
__UpperCamelCase : Dict ='accelerate_sagemaker_execution_role'
print(F'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(a_ )
__UpperCamelCase : List[Any] =_ask_field(
'Do you want to use custom Docker image? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : int =None
if is_custom_docker_image:
__UpperCamelCase : List[Any] =_ask_field('Enter your Docker image: ' ,lambda a_ : str(a_ ).lower() )
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Optional[Any] =None
if is_sagemaker_inputs_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : str =_ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Dict =None
if is_sagemaker_metrics_enabled:
__UpperCamelCase : Optional[Any] =_ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ' ,lambda a_ : str(a_ ).lower() ,)
__UpperCamelCase : int =_ask_options(
'What is the distributed mode?' ,['No distributed training', 'Data parallelism'] ,_convert_sagemaker_distributed_mode ,)
__UpperCamelCase : int ={}
__UpperCamelCase : str =_ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_dynamo:
__UpperCamelCase : Dict ='dynamo_'
__UpperCamelCase : Optional[int] =_ask_options(
'Which dynamo backend would you like to use?' ,[x.lower() for x in DYNAMO_BACKENDS] ,_convert_dynamo_backend ,default=2 ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
if use_custom_options:
__UpperCamelCase : List[str] =_ask_options(
'Which mode do you want to use?' ,a_ ,lambda a_ : TORCH_DYNAMO_MODES[int(a_ )] ,default='default' ,)
__UpperCamelCase : Union[str, Any] =_ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple =_ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ' ,_convert_yes_no_to_bool ,default=a_ ,error_message='Please enter yes or no.' ,)
__UpperCamelCase : Tuple ='Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__UpperCamelCase : int =_ask_options(
a_ ,a_ ,lambda a_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(a_ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__UpperCamelCase : List[str] =_ask_field(a_ ,lambda a_ : str(a_ ).lower() ,default='ml.p3.2xlarge' )
__UpperCamelCase : Union[str, Any] =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__UpperCamelCase : List[str] =_ask_field(
'How many machines do you want use? [1]: ' ,a_ ,default=1 ,)
__UpperCamelCase : Optional[Any] =_ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?' ,['no', 'fp16', 'bf16', 'fp8'] ,_convert_mixed_precision ,)
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=a_ ,compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER ,distributed_type=a_ ,use_cpu=a_ ,dynamo_config=a_ ,eca_instance_type=a_ ,profile=a_ ,region=a_ ,iam_role_name=a_ ,mixed_precision=a_ ,num_machines=a_ ,sagemaker_inputs_file=a_ ,sagemaker_metrics_file=a_ ,)
| 245
| 1
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def __lowerCamelCase ( A__ , A__ , A__ ) -> Dict:
"""simple docstring"""
UpperCamelCase = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
UpperCamelCase = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(A__ ):
os.makedirs(A__ )
UpperCamelCase = model.state_dict()
def to_tf_var_name(A__ ):
for patt, repl in iter(A__ ):
UpperCamelCase = name.replace(A__ , A__ )
return F"""bert/{name}"""
def create_tf_var(A__ , A__ , A__ ):
UpperCamelCase = tf.dtypes.as_dtype(tensor.dtype )
UpperCamelCase = tf.get_variable(dtype=A__ , shape=tensor.shape , name=A__ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(A__ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
UpperCamelCase = to_tf_var_name(A__ )
UpperCamelCase = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
UpperCamelCase = torch_tensor.T
UpperCamelCase = create_tf_var(tensor=A__ , name=A__ , session=A__ )
tf.keras.backend.set_value(A__ , A__ )
UpperCamelCase = session.run(A__ )
print(F"""Successfully created {tf_name}: {np.allclose(A__ , A__ )}""" )
UpperCamelCase = tf.train.Saver(tf.trainable_variables() )
saver.save(A__ , os.path.join(A__ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def __lowerCamelCase ( A__=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A__ , required=A__ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=A__ , default=A__ , required=A__ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=A__ , required=A__ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=A__ , required=A__ , help='Directory in which to save tensorflow model' )
UpperCamelCase = parser.parse_args(A__ )
UpperCamelCase = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=A__ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 28
|
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_lowerCamelCase : List[str] = 5_0000
_lowerCamelCase : Optional[int] = 5000
_lowerCamelCase ,_lowerCamelCase : int = os.path.split(__file__)
_lowerCamelCase : str = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def __lowerCamelCase ( A__ , A__ ) -> Any:
"""simple docstring"""
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> int:
"""simple docstring"""
for i in range(0 , len(A__ ) , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ ) -> List[Any]:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(A__ ):
UpperCamelCase = dataset[i]
@get_duration
def __lowerCamelCase ( A__ , A__ , A__ , A__ ) -> int:
"""simple docstring"""
with dataset.formatted_as(type=A__ ):
for i in range(0 , A__ , A__ ):
UpperCamelCase = dataset[i : i + batch_size]
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
UpperCamelCase = {'num examples': SPEED_TEST_N_EXAMPLES}
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted, {'type': 'pandas', 'length': SMALL_TEST}),
(read_formatted, {'type': 'torch', 'length': SMALL_TEST}),
(read_formatted, {'type': 'tensorflow', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
UpperCamelCase = [
(read, {'length': SMALL_TEST}),
(read, {'length': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 10}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 100}),
(read_batch, {'length': SPEED_TEST_N_EXAMPLES, 'batch_size': 1_000}),
(read_formatted, {'type': 'numpy', 'length': SMALL_TEST}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 10}),
(read_formatted_batch, {'type': 'numpy', 'length': SMALL_TEST, 'batch_size': 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('generating dataset' )
UpperCamelCase = datasets.Features(
{'list': datasets.Sequence(datasets.Value('float32' ) ), 'numbers': datasets.Value('float32' )} )
UpperCamelCase = generate_example_dataset(
os.path.join(A__ , 'dataset.arrow' ) , A__ , num_examples=A__ , seq_shapes={'list': (100,)} , )
print('first set of iterations' )
for func, kwargs in functions:
print(func.__name__ , str(A__ ) )
UpperCamelCase = func(A__ , **A__ )
print('shuffling dataset' )
UpperCamelCase = dataset.shuffle()
print('Second set of iterations (after shuffling' )
for func, kwargs in functions_shuffled:
print('shuffled ' , func.__name__ , str(A__ ) )
UpperCamelCase = func(
A__ , **A__ )
with open(A__ , 'wb' ) as f:
f.write(json.dumps(A__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 28
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCamelCase__ : Union[str, Any] = str(bin(__snake_case ) )
binary_number += "0" * shift_amount
return binary_number
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
UpperCamelCase__ : Any = str(bin(__snake_case ) )[2:]
if shift_amount >= len(__snake_case ):
return "0b0"
UpperCamelCase__ : Tuple = binary_number[: len(__snake_case ) - shift_amount]
return "0b" + shifted_binary_number
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
if number >= 0: # Get binary representation of positive number
UpperCamelCase__ : Optional[Any] = "0" + str(bin(__snake_case ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
UpperCamelCase__ : Dict = len(bin(__snake_case )[3:] ) # Find 2's complement of number
UpperCamelCase__ : Any = bin(abs(__snake_case ) - (1 << binary_number_length) )[3:]
UpperCamelCase__ : List[str] = (
"1" + "0" * (binary_number_length - len(__snake_case )) + binary_number
)
if shift_amount >= len(__snake_case ):
return "0b" + binary_number[0] * len(__snake_case )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__snake_case ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __a ( A__ ):
_lowerCAmelCase : str = field(default='''language-modeling''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCAmelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
_lowerCAmelCase : ClassVar[Features] = Features({} )
_lowerCAmelCase : str = "text"
@property
def __lowercase ( self : str ):
'''simple docstring'''
return {self.text_column: "text"}
| 196
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''ctrl'''
lowerCAmelCase_ = ['''past_key_values''']
lowerCAmelCase_ = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[Any] , _A : int=24_6534 , _A : str=256 , _A : List[Any]=1280 , _A : Tuple=8192 , _A : List[str]=48 , _A : List[str]=16 , _A : str=0.1 , _A : Any=0.1 , _A : str=1e-6 , _A : str=0.02 , _A : Union[str, Any]=True , **_A : str , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[Any] = n_positions
__SCREAMING_SNAKE_CASE : Dict = n_embd
__SCREAMING_SNAKE_CASE : List[Any] = n_layer
__SCREAMING_SNAKE_CASE : Tuple = n_head
__SCREAMING_SNAKE_CASE : str = dff
__SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
__SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
super().__init__(**_A )
| 303
|
from heapq import heappop, heappush
import numpy as np
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = grid.shape
__SCREAMING_SNAKE_CASE : Tuple = [-1, 1, 0, 0]
__SCREAMING_SNAKE_CASE : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = [(0, source)], set()
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.full((rows, cols) , np.inf )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.empty((rows, cols) , dtype=snake_case )
__SCREAMING_SNAKE_CASE : List[Any] = None
while queue:
((__SCREAMING_SNAKE_CASE), (__SCREAMING_SNAKE_CASE)) : Any = heappop(snake_case )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
__SCREAMING_SNAKE_CASE : int = []
while (x, y) != source:
path.append((x, y) )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = predecessors[x, y]
path.append(snake_case ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(snake_case ) ):
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
__SCREAMING_SNAKE_CASE : Optional[int] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(snake_case , (dist + 1, (nx, ny)) )
__SCREAMING_SNAKE_CASE : int = dist + 1
__SCREAMING_SNAKE_CASE : Dict = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 303
| 1
|
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__a = logging.getLogger(__name__)
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = np.argmax(lowerCAmelCase__ , axis=1 )
return np.sum(outputs == labels )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
with open(lowerCAmelCase__ , encoding='''utf_8''' ) as f:
UpperCAmelCase_ : Union[str, Any] = csv.reader(lowerCAmelCase__ )
UpperCAmelCase_ : Any = []
next(lowerCAmelCase__ ) # skip the first line
for line in tqdm(lowerCAmelCase__ ):
output.append((''' '''.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = []
for dataset in encoded_datasets:
UpperCAmelCase_ : List[str] = len(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
UpperCAmelCase_ : Optional[Any] = np.zeros((n_batch, 2) , dtype=np.intaa )
UpperCAmelCase_ : Optional[Any] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
UpperCAmelCase_ : Dict = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ : Dict = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
UpperCAmelCase_ : Tuple = with_conta
UpperCAmelCase_ : str = with_conta
UpperCAmelCase_ : Dict = len(lowerCAmelCase__ ) - 1
UpperCAmelCase_ : Tuple = len(lowerCAmelCase__ ) - 1
UpperCAmelCase_ : Tuple = with_conta
UpperCAmelCase_ : Optional[Any] = with_conta
UpperCAmelCase_ : Dict = mc_label
UpperCAmelCase_ : Tuple = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase__ ) for t in all_inputs ) )
return tensor_datasets
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase__ , default='''openai-gpt''' , help='''pretrained model name''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_eval''' , action='''store_true''' , help='''Whether to run eval on the dev set.''' )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument('''--train_dataset''' , type=lowerCAmelCase__ , default='''''' )
parser.add_argument('''--eval_dataset''' , type=lowerCAmelCase__ , default='''''' )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=42 )
parser.add_argument('''--num_train_epochs''' , type=lowerCAmelCase__ , default=3 )
parser.add_argument('''--train_batch_size''' , type=lowerCAmelCase__ , default=8 )
parser.add_argument('''--eval_batch_size''' , type=lowerCAmelCase__ , default=16 )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowerCAmelCase__ , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , type=lowerCAmelCase__ , default=1 )
parser.add_argument(
'''--max_steps''' , default=-1 , type=lowerCAmelCase__ , help=(
'''If > 0: set total number of training steps to perform. Override num_train_epochs.'''
) , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowerCAmelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--learning_rate''' , type=lowerCAmelCase__ , default=6.2_5E-5 )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowerCAmelCase__ , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--lr_schedule''' , type=lowerCAmelCase__ , default='''warmup_linear''' )
parser.add_argument('''--weight_decay''' , type=lowerCAmelCase__ , default=0.01 )
parser.add_argument('''--lm_coef''' , type=lowerCAmelCase__ , default=0.9 )
parser.add_argument('''--n_valid''' , type=lowerCAmelCase__ , default=374 )
parser.add_argument('''--server_ip''' , type=lowerCAmelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
parser.add_argument('''--server_port''' , type=lowerCAmelCase__ , default='''''' , help='''Can be used for distant debugging.''' )
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
print(lowerCAmelCase__ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('''Waiting for debugger attach''' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase__ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
UpperCAmelCase_ : Optional[Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
UpperCAmelCase_ : Tuple = torch.cuda.device_count()
logger.info('''device: {}, n_gpu {}'''.format(lowerCAmelCase__ , lowerCAmelCase__ ) )
if not args.do_train and not args.do_eval:
raise ValueError('''At least one of `do_train` or `do_eval` must be True.''' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
UpperCAmelCase_ : List[Any] = ["""_start_""", """_delimiter_""", """_classify_"""]
UpperCAmelCase_ : Dict = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCAmelCase__ ) )
model.to(lowerCAmelCase__ )
# Load and encode the datasets
def tokenize_and_encode(_lowercase ):
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase__ ) )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return obj
return [tokenize_and_encode(lowerCAmelCase__ ) for o in obj]
logger.info('''Encoding dataset...''' )
UpperCAmelCase_ : Any = load_rocstories_dataset(args.train_dataset )
UpperCAmelCase_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset )
UpperCAmelCase_ : Optional[int] = (train_dataset, eval_dataset)
UpperCAmelCase_ : str = tokenize_and_encode(lowerCAmelCase__ )
# Compute the max input length for the Transformer
UpperCAmelCase_ : Optional[Any] = model.config.n_positions // 2 - 2
UpperCAmelCase_ : Dict = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
UpperCAmelCase_ : Any = min(lowerCAmelCase__ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
UpperCAmelCase_ : Any = pre_process_datasets(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , *lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = tensor_datasets[0], tensor_datasets[1]
UpperCAmelCase_ : Optional[Any] = TensorDataset(*lowerCAmelCase__ )
UpperCAmelCase_ : Optional[Any] = RandomSampler(lowerCAmelCase__ )
UpperCAmelCase_ : int = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.train_batch_size )
UpperCAmelCase_ : Optional[int] = TensorDataset(*lowerCAmelCase__ )
UpperCAmelCase_ : Union[str, Any] = SequentialSampler(lowerCAmelCase__ )
UpperCAmelCase_ : Any = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
UpperCAmelCase_ : Dict = args.max_steps
UpperCAmelCase_ : str = args.max_steps // (len(lowerCAmelCase__ ) // args.gradient_accumulation_steps) + 1
else:
UpperCAmelCase_ : Optional[int] = len(lowerCAmelCase__ ) // args.gradient_accumulation_steps * args.num_train_epochs
UpperCAmelCase_ : Union[str, Any] = list(model.named_parameters() )
UpperCAmelCase_ : Dict = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
UpperCAmelCase_ : List[str] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], """weight_decay""": 0.0},
]
UpperCAmelCase_ : Union[str, Any] = AdamW(lowerCAmelCase__ , lr=args.learning_rate , eps=args.adam_epsilon )
UpperCAmelCase_ : Dict = get_linear_schedule_with_warmup(
lowerCAmelCase__ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase__ )
if args.do_train:
UpperCAmelCase_ : List[str] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='''Epoch''' ):
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Any = 0
UpperCAmelCase_ : Optional[int] = tqdm(lowerCAmelCase__ , desc='''Training''' )
for step, batch in enumerate(lowerCAmelCase__ ):
UpperCAmelCase_ : Optional[Any] = tuple(t.to(lowerCAmelCase__ ) for t in batch )
UpperCAmelCase_ : str = batch
UpperCAmelCase_ : List[str] = model(lowerCAmelCase__ , mc_token_ids=lowerCAmelCase__ , lm_labels=lowerCAmelCase__ , mc_labels=lowerCAmelCase__ )
UpperCAmelCase_ : List[str] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
UpperCAmelCase_ : Tuple = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
UpperCAmelCase_ : Optional[Any] = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCAmelCase__ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
UpperCAmelCase_ : Dict = model.module if hasattr(lowerCAmelCase__ , '''module''' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
UpperCAmelCase_ : Any = os.path.join(args.output_dir , lowerCAmelCase__ )
UpperCAmelCase_ : Optional[int] = os.path.join(args.output_dir , lowerCAmelCase__ )
torch.save(model_to_save.state_dict() , lowerCAmelCase__ )
model_to_save.config.to_json_file(lowerCAmelCase__ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
UpperCAmelCase_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
UpperCAmelCase_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCAmelCase__ )
if args.do_eval:
model.eval()
UpperCAmelCase_ : Union[str, Any] = 0, 0
UpperCAmelCase_ : int = 0, 0
for batch in tqdm(lowerCAmelCase__ , desc='''Evaluating''' ):
UpperCAmelCase_ : str = tuple(t.to(lowerCAmelCase__ ) for t in batch )
UpperCAmelCase_ : Dict = batch
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(
lowerCAmelCase__ , mc_token_ids=lowerCAmelCase__ , lm_labels=lowerCAmelCase__ , mc_labels=lowerCAmelCase__ )
UpperCAmelCase_ : str = mc_logits.detach().cpu().numpy()
UpperCAmelCase_ : str = mc_labels.to('''cpu''' ).numpy()
UpperCAmelCase_ : Dict = accuracy(lowerCAmelCase__ , lowerCAmelCase__ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
UpperCAmelCase_ : Optional[Any] = eval_loss / nb_eval_steps
UpperCAmelCase_ : Any = eval_accuracy / nb_eval_examples
UpperCAmelCase_ : Any = tr_loss / nb_tr_steps if args.do_train else None
UpperCAmelCase_ : Optional[int] = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
UpperCAmelCase_ : List[str] = os.path.join(args.output_dir , '''eval_results.txt''' )
with open(lowerCAmelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key in sorted(result.keys() ):
logger.info(''' %s = %s''' , lowerCAmelCase__ , str(result[key] ) )
writer.write('''%s = %s\n''' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 366
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {
'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = ['BloomTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST',
'BloomForCausalLM',
'BloomModel',
'BloomPreTrainedModel',
'BloomForSequenceClassification',
'BloomForTokenClassification',
'BloomForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 235
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ) -> Union[str, Any]:
__UpperCAmelCase : Any = "https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"
__UpperCAmelCase : Optional[Any] = Image.open(requests.get(snake_case__, stream=snake_case__ ).raw ).convert("RGB" )
return image
def _UpperCamelCase ( snake_case__ ) -> List[str]:
__UpperCAmelCase : List[str] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.weight''', f'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm1.bias''', f'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.weight''', f'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.norm2.bias''', f'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.qkv.weight''', f'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.weight''', f'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((f'''visual_encoder.blocks.{i}.attn.proj.bias''', f'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc1.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.weight''', f'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((f'''visual_encoder.blocks.{i}.mlp.fc2.bias''', f'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.embeddings.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.embeddings.layernorm.bias") )
# fmt: on
return rename_keys
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> Union[str, Any]:
__UpperCAmelCase : Optional[int] = dct.pop(snake_case__ )
__UpperCAmelCase : Dict = val
def _UpperCamelCase ( snake_case__, snake_case__ ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : List[str] = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.q_bias''' )
__UpperCAmelCase : str = state_dict.pop(f'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
__UpperCAmelCase : Dict = torch.cat((q_bias, torch.zeros_like(snake_case__, requires_grad=snake_case__ ), v_bias) )
__UpperCAmelCase : Any = qkv_bias
def _UpperCamelCase ( snake_case__ ) -> str:
__UpperCAmelCase : Union[str, Any] = 364 if "coco" in model_name else 224
__UpperCAmelCase : Union[str, Any] = InstructBlipVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
__UpperCAmelCase : int = TaConfig.from_pretrained("google/flan-t5-xl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : Optional[Any] = TaConfig.from_pretrained("google/flan-t5-xxl", dense_act_fn="gelu", bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
__UpperCAmelCase : Optional[Any] = LlamaConfig.from_pretrained("decapoda-research/llama-7b-hf", vocab_size=3_2001 ).to_dict()
elif "vicuna-13b" in model_name:
__UpperCAmelCase : Optional[int] = LlamaConfig.from_pretrained("decapoda-research/llama-13b-hf", vocab_size=3_2001 ).to_dict()
else:
raise ValueError("Model name not supported" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
__UpperCAmelCase : List[Any] = InstructBlipQFormerConfig(vocab_size=3_0523 ).to_dict()
__UpperCAmelCase : List[Any] = InstructBlipConfig(vision_config=snake_case__, text_config=snake_case__, qformer_config=snake_case__ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( snake_case__, snake_case__=None, snake_case__=False ) -> Tuple:
__UpperCAmelCase : str = AutoTokenizer.from_pretrained("bert-base-uncased", truncation_side="left" )
qformer_tokenizer.add_special_tokens({"bos_token": "[DEC]"} )
if "t5" in model_name:
__UpperCAmelCase : Tuple = TaTokenizerFast.from_pretrained("google/flan-t5-xl", truncation_side="left" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
__UpperCAmelCase : str = LlamaTokenizerFast.from_pretrained(
"huggyllama/llama-7b", truncation_side="left", bos_token="</s>", unk_token="</s>" )
tokenizer.add_special_tokens({"pad_token": "[PAD]"} )
__UpperCAmelCase , __UpperCAmelCase : List[str] = get_blipa_config(snake_case__ )
__UpperCAmelCase : Any = InstructBlipForConditionalGeneration(snake_case__ ).eval()
__UpperCAmelCase : Optional[int] = {
"instructblip-vicuna-7b": ("blip2_vicuna_instruct", "vicuna7b"),
"instructblip-vicuna-13b": ("blip2_vicuna_instruct", "vicuna13b"),
"instructblip-flan-t5-xl": ("blip2_t5_instruct", "flant5xl"),
"instructblip-flan-t5-xxl": ("blip2_t5_instruct", "flant5xxl"),
}
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__UpperCAmelCase : Any = "cuda:1" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase : Optional[int] = "cuda:2" if torch.cuda.is_available() else "cpu"
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = load_model_and_preprocess(
name=snake_case__, model_type=snake_case__, is_eval=snake_case__, device=snake_case__ )
original_model.eval()
print("Done!" )
# update state dict keys
__UpperCAmelCase : Tuple = original_model.state_dict()
__UpperCAmelCase : Optional[Any] = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__, snake_case__, snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : Optional[int] = state_dict.pop(snake_case__ )
if key.startswith("Qformer.bert" ):
__UpperCAmelCase : Tuple = key.replace("Qformer.bert", "qformer" )
if "attention.self" in key:
__UpperCAmelCase : List[Any] = key.replace("self", "attention" )
if "llm_proj" in key:
__UpperCAmelCase : Optional[Any] = key.replace("llm_proj", "language_projection" )
if "t5_proj" in key:
__UpperCAmelCase : Optional[Any] = key.replace("t5_proj", "language_projection" )
if key.startswith("llm_model" ):
__UpperCAmelCase : List[Any] = key.replace("llm_model", "language_model" )
if key.startswith("t5" ):
__UpperCAmelCase : Union[str, Any] = key.replace("t5", "language" )
__UpperCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(snake_case__, snake_case__ )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(snake_case__, strict=snake_case__ )
__UpperCAmelCase : Dict = load_demo_image()
__UpperCAmelCase : Dict = "What is unusual about this image?"
# create processor
__UpperCAmelCase : Dict = BlipImageProcessor(
size={"height": image_size, "width": image_size}, image_mean=snake_case__, image_std=snake_case__ )
__UpperCAmelCase : Any = InstructBlipProcessor(
image_processor=snake_case__, tokenizer=snake_case__, qformer_tokenizer=snake_case__, )
__UpperCAmelCase : Union[str, Any] = processor(images=snake_case__, text=snake_case__, return_tensors="pt" ).to(snake_case__ )
# make sure processor creates exact same pixel values
__UpperCAmelCase : str = vis_processors["eval"](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
__UpperCAmelCase : int = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ), snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "vicuna" in model_name:
__UpperCAmelCase : Optional[int] = original_model({"image": original_pixel_values, "text_input": [prompt]} ).logits
__UpperCAmelCase : Union[str, Any] = hf_model(**snake_case__ ).logits
else:
__UpperCAmelCase : int = original_model(
{"image": original_pixel_values, "text_input": [prompt], "text_output": ["\n"]} ).logits
__UpperCAmelCase : str = tokenizer("\n", return_tensors="pt" ).input_ids.to(snake_case__ )
__UpperCAmelCase : Dict = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id, -100 )
__UpperCAmelCase : Dict = hf_model(**snake_case__, labels=snake_case__ ).logits
print("First values of original logits:", original_logits[0, :3, :3] )
print("First values of HF logits:", logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
__UpperCAmelCase : int = 1e-4 if "vicuna" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ), snake_case__, atol=snake_case__ )
print("Looks ok!" )
print("Generating with original model..." )
__UpperCAmelCase : str = original_model.generate({"image": original_pixel_values, "prompt": prompt}, num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("Generating with HF model..." )
__UpperCAmelCase : List[str] = hf_model.generate(
**snake_case__, do_sample=snake_case__, num_beams=5, max_length=256, min_length=1, top_p=0.9, repetition_penalty=1.5, length_penalty=1.0, temperature=1, )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
__UpperCAmelCase : List[Any] = 2
print("Original generation:", snake_case__ )
__UpperCAmelCase : int = processor.batch_decode(snake_case__, skip_special_tokens=snake_case__ )
__UpperCAmelCase : List[Any] = [text.strip() for text in output_text]
print("HF generation:", snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(f'''Salesforce/{model_name}''' )
hf_model.push_to_hub(f'''Salesforce/{model_name}''' )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
_snake_case = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_snake_case = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 157
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["image_processor", "tokenizer"]
lowerCamelCase__: Tuple = "BlipImageProcessor"
lowerCamelCase__: str = "AutoTokenizer"
def __init__( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: int ) -> List[str]:
__UpperCAmelCase : Dict = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = self.image_processor
def __call__( self: Tuple , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : int = self.tokenizer
__UpperCAmelCase : Optional[int] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
__UpperCAmelCase : List[str] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
__UpperCAmelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def _lowerCamelCase ( self: Union[str, Any] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[Any] ) -> Dict:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: Dict ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : int = self.tokenizer.model_input_names
__UpperCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 157
| 1
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class A_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = StableUnCLIPPipeline
__snake_case = TEXT_TO_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
__snake_case = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__snake_case = False
def _snake_case ( self: Union[str, Any] ) -> Tuple:
__lowerCamelCase : List[str] = 32
__lowerCamelCase : Union[str, Any] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
__lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__lowerCamelCase : Dict = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=a , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : str = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a , num_layers=1 , )
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = DDPMScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1000 , clip_sample=a , clip_sample_range=5.0 , beta_schedule='squaredcos_cap_v2' , )
# regular denoising components
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = StableUnCLIPImageNormalizer(embedding_dim=a )
__lowerCamelCase : Optional[int] = DDPMScheduler(beta_schedule='squaredcos_cap_v2' )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
__lowerCamelCase : Optional[int] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock2D', 'DownBlock2D') , up_block_types=('UpBlock2D', 'CrossAttnUpBlock2D') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='projection' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a , layers_per_block=1 , upcast_attention=a , use_linear_projection=a , )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = DDIMScheduler(
beta_schedule='scaled_linear' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='v_prediction' , set_alpha_to_one=a , steps_offset=1 , )
torch.manual_seed(0 )
__lowerCamelCase : List[str] = AutoencoderKL()
__lowerCamelCase : List[str] = {
# prior components
'prior_tokenizer': prior_tokenizer,
'prior_text_encoder': prior_text_encoder,
'prior': prior,
'prior_scheduler': prior_scheduler,
# image noising components
'image_normalizer': image_normalizer,
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder,
'unet': unet,
'scheduler': scheduler,
'vae': vae,
}
return components
def _snake_case ( self: Optional[int] , a: List[Any] , a: int=0 ) -> Optional[int]:
if str(a ).startswith('mps' ):
__lowerCamelCase : str = torch.manual_seed(a )
else:
__lowerCamelCase : List[str] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'prior_num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def _snake_case ( self: List[Any] ) -> List[str]:
__lowerCamelCase : Tuple = torch_device == 'cpu'
self._test_attention_slicing_forward_pass(test_max_difference=a )
def _snake_case ( self: str ) -> Optional[int]:
__lowerCamelCase : Tuple = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=a )
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Tuple ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: str ) -> str:
__lowerCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy' )
__lowerCamelCase : Union[str, Any] = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase : Optional[Any] = pipe('anime turle' , generator=a , output_type='np' )
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
def _snake_case ( self: Union[str, Any] ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained('fusing/stable-unclip-2-1-l' , torch_dtype=torch.floataa )
__lowerCamelCase : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : str = pipe(
'anime turtle' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='np' , )
__lowerCamelCase : Any = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 360
|
import warnings
from .generation import TFGenerationMixin
class A_ ( __UpperCamelCase ):
'''simple docstring'''
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , __UpperCamelCase , )
| 194
| 0
|
def __lowercase ( _A ) -> tuple[int, int]:
try:
SCREAMING_SNAKE_CASE : Dict = float(_A )
except ValueError:
raise ValueError("""Please enter a valid number""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = decimal - int(_A )
if fractional_part == 0:
return int(_A ), 1
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = len(str(_A ).split(""".""" )[1] )
SCREAMING_SNAKE_CASE : List[str] = int(decimal * (10**number_of_frac_digits) )
SCREAMING_SNAKE_CASE : int = 10**number_of_frac_digits
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = denominator, numerator
while True:
SCREAMING_SNAKE_CASE : List[str] = dividend % divisor
if remainder == 0:
break
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = divisor, remainder
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = numerator / divisor, denominator / divisor
return int(_A ), int(_A )
if __name__ == "__main__":
print(F"""{decimal_to_fraction(2) = }""")
print(F"""{decimal_to_fraction(89.0) = }""")
print(F"""{decimal_to_fraction('67') = }""")
print(F"""{decimal_to_fraction('45.0') = }""")
print(F"""{decimal_to_fraction(1.5) = }""")
print(F"""{decimal_to_fraction('6.25') = }""")
print(F"""{decimal_to_fraction('78td') = }""")
| 245
|
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
UpperCAmelCase__ : Tuple = logging.getLogger(__name__)
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] ="""token-classification"""
def __init__( self : Tuple , UpperCAmelCase__ : Tuple ) ->Optional[Any]:
"""simple docstring"""
if type(UpperCAmelCase__ ) == dict:
SCREAMING_SNAKE_CASE : List[str] = Namespace(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = import_module("""tasks""" )
try:
SCREAMING_SNAKE_CASE : str = getattr(UpperCAmelCase__ , hparams.task_type )
SCREAMING_SNAKE_CASE : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
f"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
SCREAMING_SNAKE_CASE : List[Any] = self.token_classification_task.get_labels(hparams.labels )
SCREAMING_SNAKE_CASE : List[Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCAmelCase__ , len(self.labels ) , self.mode )
def _lowercase ( self : List[str] , **UpperCAmelCase__ : Optional[int] ) ->Any:
"""simple docstring"""
return self.model(**UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : List[str] = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Union[str, Any] = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : int = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.hparams
for mode in ["train", "dev", "test"]:
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
if os.path.exists(UpperCAmelCase__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Tuple = torch.load(UpperCAmelCase__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
SCREAMING_SNAKE_CASE : Optional[int] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[str] = self.token_classification_task.convert_examples_to_features(
UpperCAmelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["""xlnet"""] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["""xlnet"""] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCAmelCase__ , pad_on_left=bool(self.config.model_type in ["""xlnet"""] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("""Saving features into cached file %s""" , UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : bool = False ) ->DataLoader:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self._feature_file(UpperCAmelCase__ )
logger.info("""Loading features from cached file %s""" , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = torch.load(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
SCREAMING_SNAKE_CASE : int = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
SCREAMING_SNAKE_CASE : Any = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
SCREAMING_SNAKE_CASE : Dict = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) , batch_size=UpperCAmelCase__ )
def _lowercase ( self : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[Any] ) ->Tuple:
"""simple docstring"""
"""Compute validation""" ""
SCREAMING_SNAKE_CASE : Tuple = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type != "distilbert":
SCREAMING_SNAKE_CASE : str = (
batch[2] if self.config.model_type in ["""bert""", """xlnet"""] else None
) # XLM and RoBERTa don"t use token_type_ids
SCREAMING_SNAKE_CASE : Dict = self(**UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = outputs[:2]
SCREAMING_SNAKE_CASE : Optional[Any] = logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowercase ( self : int , UpperCAmelCase__ : List[str] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = torch.stack([x["""val_loss"""] for x in outputs] ).mean()
SCREAMING_SNAKE_CASE : str = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Tuple = np.argmax(UpperCAmelCase__ , axis=2 )
SCREAMING_SNAKE_CASE : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = dict(enumerate(self.labels ) )
SCREAMING_SNAKE_CASE : int = [[] for _ in range(out_label_ids.shape[0] )]
SCREAMING_SNAKE_CASE : Optional[int] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
SCREAMING_SNAKE_CASE : Tuple = {
"""val_loss""": val_loss_mean,
"""accuracy_score""": accuracy_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""precision""": precision_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""recall""": recall_score(UpperCAmelCase__ , UpperCAmelCase__ ),
"""f1""": fa_score(UpperCAmelCase__ , UpperCAmelCase__ ),
}
SCREAMING_SNAKE_CASE : Optional[int] = dict(results.items() )
SCREAMING_SNAKE_CASE : Optional[Any] = results
return ret, preds_list, out_label_list
def _lowercase ( self : Dict , UpperCAmelCase__ : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._eval_end(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowercase ( self : List[str] , UpperCAmelCase__ : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._eval_end(UpperCAmelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
SCREAMING_SNAKE_CASE : Dict = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowercase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int ) ->List[Any]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(UpperCAmelCase__ , UpperCAmelCase__ )
parser.add_argument(
"""--task_type""" , default="""NER""" , type=UpperCAmelCase__ , help="""Task type to fine tune in training (e.g. NER, POS, etc)""" )
parser.add_argument(
"""--max_seq_length""" , default=1_2_8 , type=UpperCAmelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--labels""" , default="""""" , type=UpperCAmelCase__ , help="""Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=UpperCAmelCase__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
if __name__ == "__main__":
UpperCAmelCase__ : str = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
UpperCAmelCase__ : Tuple = NERTransformer.add_model_specific_args(parser, os.getcwd())
UpperCAmelCase__ : int = parser.parse_args()
UpperCAmelCase__ : Union[str, Any] = NERTransformer(args)
UpperCAmelCase__ : str = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
UpperCAmelCase__ : Union[str, Any] = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
UpperCAmelCase__ : Any = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 245
| 1
|
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , *,
__lowerCAmelCase : int = 4 , __lowerCAmelCase : int = 7_68 , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , ) -> Dict:
"""simple docstring"""
super().__init__()
A__ = nn.Parameter(torch.zeros(__lowerCAmelCase ) )
# parameters for additional clip time embeddings
A__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
A__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
# parameters for encoder hidden states
A__ = clip_extra_context_tokens
A__ = nn.Linear(
__lowerCAmelCase , self.clip_extra_context_tokens * cross_attention_dim )
A__ = nn.Linear(__lowerCAmelCase , __lowerCAmelCase )
A__ = nn.LayerNorm(__lowerCAmelCase )
def a_ ( self : Tuple , *, __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
A__ = image_embeddings.shape[0]
A__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
A__ = classifier_free_guidance_embeddings.expand(
__lowerCAmelCase , -1 )
A__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
A__ = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
A__ = self.embedding_proj(__lowerCAmelCase )
A__ = self.clip_image_embeddings_project_to_time_embeddings(__lowerCAmelCase )
A__ = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
A__ = self.clip_extra_context_tokens_proj(__lowerCAmelCase )
A__ = clip_extra_context_tokens.reshape(__lowerCAmelCase , -1 , self.clip_extra_context_tokens )
A__ = clip_extra_context_tokens.permute(0 , 2 , 1 )
A__ = self.encoder_hidden_states_proj(__lowerCAmelCase )
A__ = self.text_encoder_hidden_states_norm(__lowerCAmelCase )
A__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 354
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
A : Optional[Any] = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
A : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276
| 0
|
"""simple docstring"""
def _snake_case ( ):
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(lowercase__ , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"{solution() = }")
| 96
|
def snake_case_ ( snake_case ) -> int:
if n == 1 or not isinstance(snake_case , snake_case ):
return 0
elif n == 2:
return 1
else:
lowercase__: Optional[Any] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def snake_case_ ( snake_case ) -> int:
lowercase__: int = 0
lowercase__: int = 2
while digits < n:
index += 1
lowercase__: Tuple = len(str(fibonacci(snake_case ) ) )
return index
def snake_case_ ( snake_case = 10_00 ) -> int:
return fibonacci_digits_index(snake_case )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 196
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
"""configuration_vivit""": ["""VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VivitConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ["""VivitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VivitModel""",
"""VivitPreTrainedModel""",
"""VivitForVideoClassification""",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 229
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json""",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''convbert'''
def __init__( self , _lowercase=30_522 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=2 , _lowercase=0.02 , _lowercase=1e-12 , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=768 , _lowercase=2 , _lowercase=9 , _lowercase=1 , _lowercase=None , **_lowercase , ):
"""simple docstring"""
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = embedding_size
_lowerCAmelCase = head_ratio
_lowerCAmelCase = conv_kernel_size
_lowerCAmelCase = num_groups
_lowerCAmelCase = classifier_dropout
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _lowercase ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
_lowerCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_lowerCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 229
| 1
|
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__A = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__A = json.load(f)
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : Optional[int]) ->str:
'''simple docstring'''
return FSMTTokenizer.from_pretrained(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Dict =FSMTForConditionalGeneration.from_pretrained(UpperCAmelCase_).to(UpperCAmelCase_)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["en-ru", 26.0],
["ru-en", 22.0],
["en-de", 22.0],
["de-en", 29.0],
])
@slow
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Tuple =F"""facebook/wmt19-{pair}"""
lowerCamelCase__: str =self.get_tokenizer(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =self.get_model(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =bleu_data[pair]["src"]
lowerCamelCase__: Optional[int] =bleu_data[pair]["tgt"]
lowerCamelCase__: Union[str, Any] =tokenizer(UpperCAmelCase_ , return_tensors="pt" , truncation=UpperCAmelCase_ , padding="longest").to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase__: Union[str, Any] =tokenizer.batch_decode(
UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_)
lowerCamelCase__: str =calculate_bleu(UpperCAmelCase_ , UpperCAmelCase_)
print(UpperCAmelCase_)
self.assertGreaterEqual(scores["bleu"] , UpperCAmelCase_)
| 10
|
a__ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 235
| 0
|
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Optional[int] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ):
_UpperCAmelCase : Optional[Any] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_UpperCAmelCase : Optional[int] = s_dict.pop(UpperCamelCase__ )
elif "subsample" in key:
_UpperCAmelCase : Tuple = s_dict.pop(UpperCamelCase__ )
def lowerCamelCase_ (UpperCamelCase__ : str ):
_UpperCAmelCase : Any = emb.weight.shape
_UpperCAmelCase : Any = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
_UpperCAmelCase : Any = emb.weight.data
return lin_layer
def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Dict ):
_UpperCAmelCase : Tuple = torch.load(UpperCamelCase__ , map_location='''cpu''' )
_UpperCAmelCase : Optional[int] = mam_aaa['''args''']
_UpperCAmelCase : str = mam_aaa['''model''']
_UpperCAmelCase : Optional[Any] = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(UpperCamelCase__ )
rename_keys(UpperCamelCase__ )
_UpperCAmelCase : str = state_dict['''decoder.embed_tokens.weight'''].shape[0]
_UpperCAmelCase : str = args.share_decoder_input_output_embed
_UpperCAmelCase : str = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(''',''' )]
_UpperCAmelCase : Optional[int] = SpeechaTextConfig(
vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , )
_UpperCAmelCase : Any = SpeechaTextForConditionalGeneration(UpperCamelCase__ )
_UpperCAmelCase : Tuple = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F' but all the following weights are missing {missing}' )
if tie_embeds:
_UpperCAmelCase : Optional[int] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_UpperCAmelCase : List[Any] = lm_head_weights
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowerCAmelCase :Optional[int] = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 358
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCAmelCase :Optional[Any] = {
'configuration_efficientnet': [
'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EfficientNetConfig',
'EfficientNetOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :str = ['EfficientNetImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase :Any = [
'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'EfficientNetForImageClassification',
'EfficientNetModel',
'EfficientNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
_lowerCAmelCase :Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 68
| 0
|
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
snake_case__ : str = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
snake_case__ : Optional[int] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( _snake_case : Any ):
lowerCAmelCase : Optional[int] = []
for i in range(len(__snake_case ) ):
lowerCAmelCase : Optional[Any] = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCAmelCase : int = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCAmelCase : Optional[Any] = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__snake_case )
return next_generation
def _snake_case ( _snake_case : Dict , _snake_case : Optional[Any] ):
lowerCAmelCase : Optional[int] = []
for _ in range(__snake_case ):
# Create output image
lowerCAmelCase : Any = Image.new('''RGB''' , (len(cells[0] ), len(__snake_case )) )
lowerCAmelCase : Optional[int] = img.load()
# Save cells to image
for x in range(len(__snake_case ) ):
for y in range(len(cells[0] ) ):
lowerCAmelCase : int = 255 - cells[y][x] * 255
lowerCAmelCase : int = (colour, colour, colour)
# Save image
images.append(__snake_case )
lowerCAmelCase : List[str] = new_generation(__snake_case )
return images
if __name__ == "__main__":
snake_case__ : Union[str, Any] = generate_images(GLIDER, 16)
images[0].save('''out.gif''', save_all=True, append_images=images[1:])
| 60
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
return credit_card_number.startswith(('''34''', '''35''', '''37''', '''4''', '''5''', '''6''') )
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = credit_card_number
_UpperCamelCase = 0
_UpperCamelCase = len(__snake_case ) - 2
for i in range(__snake_case, -1, -2 ):
# double the value of every second digit
_UpperCamelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_UpperCamelCase = cc_number[:i] + str(__snake_case ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__snake_case ) - 1, -1, -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase__ ( __snake_case ) -> bool:
"""simple docstring"""
_UpperCamelCase = F'''{credit_card_number} is an invalid credit card number because'''
if not credit_card_number.isdigit():
print(F'''{error_message} it has nonnumerical characters.''' )
return False
if not 13 <= len(__snake_case ) <= 16:
print(F'''{error_message} of its length.''' )
return False
if not validate_initial_digits(__snake_case ):
print(F'''{error_message} of its first two digits.''' )
return False
if not luhn_validation(__snake_case ):
print(F'''{error_message} it fails the Luhn check.''' )
return False
print(F'''{credit_card_number} is a valid credit card number.''' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 194
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( _a , _a , _a , unittest.TestCase ):
'''simple docstring'''
__a: Optional[int] = StableDiffusionInstructPixaPixPipeline
__a: Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''}
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
__a: int = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=snake_case_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowerCAmelCase_ = CLIPTextModel(snake_case_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(snake_case_ ) ).convert('RGB' )
if str(snake_case_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(snake_case_ )
else:
lowerCAmelCase_ = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
lowerCAmelCase_ = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""image_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
lowerCAmelCase_ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase_ = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase_ = sd_pipe(**snake_case_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
lowerCAmelCase_ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase_ = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase_ = """french fries"""
lowerCAmelCase_ = sd_pipe(**snake_case_ , negative_prompt=snake_case_ )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
lowerCAmelCase_ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase_ = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase_ = [inputs["""prompt"""]] * 2
lowerCAmelCase_ = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
lowerCAmelCase_ = torch.from_numpy(snake_case_ ).unsqueeze(0 ).to(snake_case_ )
lowerCAmelCase_ = image / 2 + 0.5
lowerCAmelCase_ = image.permute(0 , 3 , 1 , 2 )
lowerCAmelCase_ = image.repeat(2 , 1 , 1 , 1 )
lowerCAmelCase_ = sd_pipe(**snake_case_ ).images
lowerCAmelCase_ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
lowerCAmelCase_ = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase_ = self.get_dummy_inputs(snake_case_ )
lowerCAmelCase_ = sd_pipe(**snake_case_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = [round(snake_case_ , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case_ ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self ) -> int:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self ) -> Any:
'''simple docstring'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline(**snake_case_ )
lowerCAmelCase_ = VaeImageProcessor(do_resize=snake_case_ , do_normalize=snake_case_ )
lowerCAmelCase_ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
lowerCAmelCase_ = pipe(**self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' ) )[0]
lowerCAmelCase_ = components["""vae"""]
lowerCAmelCase_ = self.get_dummy_inputs_by_type(snake_case_ , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
lowerCAmelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode()
lowerCAmelCase_ = pipe(**snake_case_ )[0]
lowerCAmelCase_ = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case_ , 1e-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self , lowercase_=0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase_ = torch.manual_seed(snake_case_ )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
lowerCAmelCase_ = {
"""prompt""": """turn him into a cyborg""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""image_guidance_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**snake_case_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
lowerCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**snake_case_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self ) -> Dict:
'''simple docstring'''
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ )
lowerCAmelCase_ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**snake_case_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase_ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = 0
def callback_fn(lowercase_ , lowercase_ , lowercase_ ) -> None:
lowerCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowerCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCAmelCase_ = latents[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowerCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
lowerCAmelCase_ = latents[0, -3:, -3:, -1]
lowerCAmelCase_ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowerCAmelCase_ = False
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = self.get_inputs()
pipe(**snake_case_ , callback=snake_case_ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case_ , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = self.get_inputs()
lowerCAmelCase_ = pipe(**snake_case_ )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
lowerCAmelCase_ = inputs["""image"""].resize((5_0_4, 5_0_4) )
lowerCAmelCase_ = """timbrooks/instruct-pix2pix"""
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case_ , safety_checker=snake_case_ , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = pipe(**snake_case_ )
lowerCAmelCase_ = output.images[0]
lowerCAmelCase_ = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
lowerCAmelCase_ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 370
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCamelCase ( a_ , a_ ) -> Tuple:
lowerCAmelCase_ = XCLIPTextConfig()
# derive patch size from model name
lowerCAmelCase_ = model_name.find('patch' )
lowerCAmelCase_ = int(model_name[start_idx + len('patch' ) : start_idx + len('patch' ) + 2] )
lowerCAmelCase_ = XCLIPVisionConfig(patch_size=a_ , num_frames=a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
lowerCAmelCase_ = 12
lowerCAmelCase_ = 1_024
lowerCAmelCase_ = 4_096
lowerCAmelCase_ = 16
lowerCAmelCase_ = 24
lowerCAmelCase_ = 768
lowerCAmelCase_ = 3_072
if model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = 336
lowerCAmelCase_ = XCLIPConfig.from_text_vision_configs(a_ , a_ )
if "large" in model_name:
lowerCAmelCase_ = 768
return config
def lowerCamelCase ( a_ ) -> List[str]:
# text encoder
if name == "token_embedding.weight":
lowerCAmelCase_ = name.replace('token_embedding.weight' , 'text_model.embeddings.token_embedding.weight' )
if name == "positional_embedding":
lowerCAmelCase_ = name.replace('positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "ln_1" in name:
lowerCAmelCase_ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
lowerCAmelCase_ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
lowerCAmelCase_ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
lowerCAmelCase_ = name.replace('c_proj' , 'fc2' )
if name.startswith('transformer.resblocks' ):
lowerCAmelCase_ = name.replace('transformer.resblocks' , 'text_model.encoder.layers' )
if "attn.out_proj" in name and "message" not in name:
lowerCAmelCase_ = name.replace('attn.out_proj' , 'self_attn.out_proj' )
if "ln_final" in name:
lowerCAmelCase_ = name.replace('ln_final' , 'text_model.final_layer_norm' )
# visual encoder
if name == "visual.class_embedding":
lowerCAmelCase_ = name.replace('visual.class_embedding' , 'vision_model.embeddings.class_embedding' )
if name == "visual.positional_embedding":
lowerCAmelCase_ = name.replace('visual.positional_embedding' , 'vision_model.embeddings.position_embedding.weight' )
if name.startswith('visual.transformer.resblocks' ):
lowerCAmelCase_ = name.replace('visual.transformer.resblocks' , 'vision_model.encoder.layers' )
if "visual.conv1" in name:
lowerCAmelCase_ = name.replace('visual.conv1' , 'vision_model.embeddings.patch_embedding' )
if "visual.ln_pre" in name:
lowerCAmelCase_ = name.replace('visual.ln_pre' , 'vision_model.pre_layernorm' )
if "visual.ln_post" in name:
lowerCAmelCase_ = name.replace('visual.ln_post' , 'vision_model.post_layernorm' )
if "visual.proj" in name:
lowerCAmelCase_ = name.replace('visual.proj' , 'visual_projection.weight' )
if "text_projection" in name:
lowerCAmelCase_ = name.replace('text_projection' , 'text_projection.weight' )
# things on top
if "prompts_visual_proj" in name:
lowerCAmelCase_ = name.replace('prompts_visual_proj' , 'prompts_visual_projection' )
if "prompts_visual_ln" in name:
lowerCAmelCase_ = name.replace('prompts_visual_ln' , 'prompts_visual_layernorm' )
# mit
if name == "mit.positional_embedding":
lowerCAmelCase_ = name.replace('positional' , 'position' )
if name.startswith('mit.resblocks' ):
lowerCAmelCase_ = name.replace('mit.resblocks' , 'mit.encoder.layers' )
# prompts generator
if name.startswith('prompts_generator.norm' ):
lowerCAmelCase_ = name.replace('prompts_generator.norm' , 'prompts_generator.layernorm' )
return name
def lowerCamelCase ( a_ , a_ ) -> Dict:
for key in orig_state_dict.copy().keys():
lowerCAmelCase_ = orig_state_dict.pop(a_ )
if "attn.in_proj" in key:
lowerCAmelCase_ = key.split('.' )
if key.startswith('visual' ):
lowerCAmelCase_ = key_split[3]
lowerCAmelCase_ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[
:dim
]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[
-dim:
]
else:
if "weight" in key:
lowerCAmelCase_ = val[
:dim, :
]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[
-dim:, :
]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
elif key.startswith('mit' ):
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.vision_config.mit_hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[dim : dim * 2, :]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[dim : dim * 2]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = key_split[2]
lowerCAmelCase_ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase_ = val[:dim, :]
lowerCAmelCase_ = val[
dim : dim * 2, :
]
lowerCAmelCase_ = val[-dim:, :]
else:
lowerCAmelCase_ = val[:dim]
lowerCAmelCase_ = val[
dim : dim * 2
]
lowerCAmelCase_ = val[-dim:]
else:
lowerCAmelCase_ = rename_key(a_ )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
lowerCAmelCase_ = val.T
lowerCAmelCase_ = val
return orig_state_dict
def lowerCamelCase ( a_ ) -> List[str]:
if num_frames == 8:
lowerCAmelCase_ = 'eating_spaghetti_8_frames.npy'
elif num_frames == 16:
lowerCAmelCase_ = 'eating_spaghetti.npy'
elif num_frames == 32:
lowerCAmelCase_ = 'eating_spaghetti_32_frames.npy'
lowerCAmelCase_ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename=a_ , repo_type='dataset' , )
lowerCAmelCase_ = np.load(a_ )
return list(a_ )
def lowerCamelCase ( a_ , a_=None , a_=False ) -> List[Any]:
lowerCAmelCase_ = {
# fully supervised kinetics-400 checkpoints
'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth',
'xclip-base-patch32-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'
),
'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth',
'xclip-base-patch16-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'
),
'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb',
'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f',
# fully supervised kinetics-600 checkpoints
'xclip-base-patch16-kinetics-600': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'
),
'xclip-base-patch16-kinetics-600-16-frames': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'
),
'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be',
# few shot
'xclip-base-patch16-hmdb-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'
),
'xclip-base-patch16-hmdb-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'
),
'xclip-base-patch16-hmdb-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'
),
'xclip-base-patch16-hmdb-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'
),
'xclip-base-patch16-ucf-2-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'
),
'xclip-base-patch16-ucf-4-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'
),
'xclip-base-patch16-ucf-8-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'
),
'xclip-base-patch16-ucf-16-shot': (
'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'
),
# zero shot
'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth',
}
lowerCAmelCase_ = model_to_url[model_name]
lowerCAmelCase_ = 8
if "16-frames" in model_name:
lowerCAmelCase_ = 16
elif "shot" in model_name:
lowerCAmelCase_ = 32
lowerCAmelCase_ = get_xclip_config(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
model.eval()
if "drive" in checkpoint_url:
lowerCAmelCase_ = 'pytorch_model.bin'
gdown.cached_download(a_ , a_ , quiet=a_ )
lowerCAmelCase_ = torch.load(a_ , map_location='cpu' )['model']
else:
lowerCAmelCase_ = torch.hub.load_state_dict_from_url(a_ )['model']
lowerCAmelCase_ = convert_state_dict(a_ , a_ )
lowerCAmelCase_ = XCLIPModel(a_ )
lowerCAmelCase_ , lowerCAmelCase_ = model.load_state_dict(a_ , strict=a_ )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
lowerCAmelCase_ = 336 if model_name == 'xclip-large-patch14-16-frames' else 224
lowerCAmelCase_ = VideoMAEImageProcessor(size=a_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = CLIPTokenizerFast.from_pretrained('openai/clip-vit-base-patch32' )
lowerCAmelCase_ = XCLIPProcessor(image_processor=a_ , tokenizer=a_ )
lowerCAmelCase_ = prepare_video(a_ )
lowerCAmelCase_ = processor(
text=['playing sports', 'eating spaghetti', 'go shopping'] , videos=a_ , return_tensors='pt' , padding=a_ )
print('Shape of pixel values:' , inputs.pixel_values.shape )
with torch.no_grad():
lowerCAmelCase_ = model(**a_ )
# Verify outputs
lowerCAmelCase_ = outputs.logits_per_video
lowerCAmelCase_ = logits_per_video.softmax(dim=1 )
print('Probs:' , a_ )
# kinetics-400
if model_name == "xclip-base-patch32":
lowerCAmelCase_ = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] )
elif model_name == "xclip-base-patch32-16-frames":
lowerCAmelCase_ = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
lowerCAmelCase_ = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] )
elif model_name == "xclip-base-patch16-16-frames":
lowerCAmelCase_ = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
lowerCAmelCase_ = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] )
elif model_name == "xclip-large-patch14-16-frames":
lowerCAmelCase_ = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
lowerCAmelCase_ = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
lowerCAmelCase_ = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
lowerCAmelCase_ = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
lowerCAmelCase_ = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
lowerCAmelCase_ = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
lowerCAmelCase_ = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
lowerCAmelCase_ = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
lowerCAmelCase_ = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
lowerCAmelCase_ = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
lowerCAmelCase_ = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(a_ , a_ , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a_ )
if push_to_hub:
print('Pushing model, processor and slow tokenizer files to the hub...' )
model.push_to_hub(a_ , organization='nielsr' )
processor.push_to_hub(a_ , organization='nielsr' )
slow_tokenizer.push_to_hub(a_ , organization='nielsr' )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
lowerCamelCase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 14
| 0
|
a_ = '''
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
a_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
a_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 340
|
'''simple docstring'''
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
A__: Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[str] ) -> int:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : np.ndarray ,_UpperCAmelCase : Optional[str] ,_UpperCAmelCase : Optional[str] = None ) -> Optional[int]:
_a : Any =tesseract_config if tesseract_config is not None else """"""
# apply OCR
_a : Optional[Any] =to_pil_image(_UpperCAmelCase )
_a , _a : List[Any] =pil_image.size
_a : List[str] =pytesseract.image_to_data(_UpperCAmelCase ,lang=_UpperCAmelCase ,output_type="""dict""" ,config=_UpperCAmelCase )
_a , _a , _a , _a , _a : str =data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
_a : Tuple =[idx for idx, word in enumerate(_UpperCAmelCase ) if not word.strip()]
_a : List[Any] =[word for idx, word in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Dict =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : List[str] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
_a : Union[str, Any] =[coord for idx, coord in enumerate(_UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_a : List[str] =[]
for x, y, w, h in zip(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
_a : int =[x, y, x + w, y + h]
actual_boxes.append(_UpperCAmelCase )
# finally, normalize the bounding boxes
_a : str =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ) )
assert len(_UpperCAmelCase ) == len(_UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( UpperCAmelCase__ ):
__UpperCamelCase : List[Any] = ["pixel_values"]
def __init__( self :Tuple , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :bool = True , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = "" , **SCREAMING_SNAKE_CASE :Tuple , ) -> None:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
_a : List[Any] =size if size is not None else {"""height""": 2_2_4, """width""": 2_2_4}
_a : Tuple =get_size_dict(SCREAMING_SNAKE_CASE )
_a : Dict =do_resize
_a : Tuple =size
_a : str =resample
_a : Dict =apply_ocr
_a : Union[str, Any] =ocr_lang
_a : Dict =tesseract_config
def __UpperCAmelCase ( self :List[str] , SCREAMING_SNAKE_CASE :np.ndarray , SCREAMING_SNAKE_CASE :Dict[str, int] , SCREAMING_SNAKE_CASE :PILImageResampling = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE :Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE :Dict , ) -> np.ndarray:
'''simple docstring'''
_a : int =get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
_a : Any =(size["""height"""], size["""width"""])
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :ImageInput , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Dict[str, int] = None , SCREAMING_SNAKE_CASE :PILImageResampling = None , SCREAMING_SNAKE_CASE :bool = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[str] = None , SCREAMING_SNAKE_CASE :Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE :ChannelDimension = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE :Optional[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
_a : Optional[int] =do_resize if do_resize is not None else self.do_resize
_a : Optional[int] =size if size is not None else self.size
_a : str =get_size_dict(SCREAMING_SNAKE_CASE )
_a : List[str] =resample if resample is not None else self.resample
_a : int =apply_ocr if apply_ocr is not None else self.apply_ocr
_a : str =ocr_lang if ocr_lang is not None else self.ocr_lang
_a : Union[str, Any] =tesseract_config if tesseract_config is not None else self.tesseract_config
_a : List[str] =make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
# All transformations expect numpy arrays.
_a : List[Any] =[to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if apply_ocr:
requires_backends(self , """pytesseract""" )
_a : Any =[]
_a : Any =[]
for image in images:
_a , _a : int =apply_tesseract(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
words_batch.append(SCREAMING_SNAKE_CASE )
boxes_batch.append(SCREAMING_SNAKE_CASE )
if do_resize:
_a : Union[str, Any] =[self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_a : Dict =[flip_channel_order(SCREAMING_SNAKE_CASE ) for image in images]
_a : str =[to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
_a : str =BatchFeature(data={"""pixel_values""": images} , tensor_type=SCREAMING_SNAKE_CASE )
if apply_ocr:
_a : List[Any] =words_batch
_a : Dict =boxes_batch
return data
| 276
| 0
|
'''simple docstring'''
from __future__ import annotations
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
_lowerCAmelCase = number_of_bytes // partitions
_lowerCAmelCase = []
for i in range(__lowerCamelCase ):
_lowerCAmelCase = i * bytes_per_partition + 1
_lowerCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 229
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 229
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : int = logging.get_logger(__name__)
_A : List[str] = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = """speech_to_text"""
_SCREAMING_SNAKE_CASE : Optional[int] = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE : str = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : List[str]=1_00_00 , SCREAMING_SNAKE_CASE__ : Optional[Any]=12 , SCREAMING_SNAKE_CASE__ : str=20_48 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE__ : List[Any]=6 , SCREAMING_SNAKE_CASE__ : List[Any]=20_48 , SCREAMING_SNAKE_CASE__ : Tuple=4 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : List[str]=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int="relu" , SCREAMING_SNAKE_CASE__ : str=2_56 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : Any=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0_2 , SCREAMING_SNAKE_CASE__ : Any=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=True , SCREAMING_SNAKE_CASE__ : Optional[Any]=1 , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , SCREAMING_SNAKE_CASE__ : int=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=60_00 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=10_24 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=(5, 5) , SCREAMING_SNAKE_CASE__ : Any=10_24 , SCREAMING_SNAKE_CASE__ : int=80 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1 , **SCREAMING_SNAKE_CASE__ : str , ) -> Optional[Any]:
__lowerCAmelCase = vocab_size
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = max_source_positions
__lowerCAmelCase = max_target_positions
__lowerCAmelCase = num_conv_layers
__lowerCAmelCase = list(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = conv_channels
__lowerCAmelCase = input_feat_per_channel
__lowerCAmelCase = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 229
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_ ( snake_case_ : Any ) -> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def UpperCamelCase_ ( snake_case_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase = emb.weight.shape
__lowerCAmelCase = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
__lowerCAmelCase = emb.weight.data
return lin_layer
def UpperCamelCase_ ( snake_case_ : Any ) -> Any:
'''simple docstring'''
__lowerCAmelCase = torch.load(snake_case_ , map_location="""cpu""" )
__lowerCAmelCase = mam_aaa["""args"""] or mam_aaa["""cfg"""]["""model"""]
__lowerCAmelCase = mam_aaa["""model"""]
remove_ignore_keys_(snake_case_ )
__lowerCAmelCase = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__lowerCAmelCase = MaMaaaConfig(
vocab_size=snake_case_ , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""relu""" , )
__lowerCAmelCase = state_dict["""decoder.embed_tokens.weight"""]
__lowerCAmelCase = MaMaaaForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ , strict=snake_case_ )
__lowerCAmelCase = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_A : str = parser.parse_args()
_A : Optional[int] = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 229
| 1
|
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> list:
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
_UpperCAmelCase : List[str] = gray_code_sequence_string(lowerCAmelCase )
#
# convert them to integers
for i in range(len(lowerCAmelCase ) ):
_UpperCAmelCase : List[str] = int(sequence[i] , 2 )
return sequence
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> list:
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_UpperCAmelCase : int = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_UpperCAmelCase : Dict = gray_code_sequence_string(bit_count - 1 )
_UpperCAmelCase : List[str] = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_UpperCAmelCase : List[Any] = "0" + smaller_sequence[i]
sequence.append(lowerCAmelCase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_UpperCAmelCase : Union[str, Any] = "1" + smaller_sequence[i]
sequence.append(lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
@dataclass
class a :
_lowercase = field(
default="tab_fact" , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
_lowercase = field(
default="tab_fact" , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} , )
_lowercase = field(
default=1_0_2_4 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the training data."} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the validation data."} )
_lowercase = field(default=UpperCAmelCase , metadata={"help": "A csv or a json file containing the test data."} )
def _UpperCAmelCase ( self ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_UpperCAmelCase : int = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Optional[int] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class a :
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowercase = field(
default=UpperCAmelCase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_lowercase = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_lowercase = field(
default=UpperCAmelCase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase )
datasets.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.set_verbosity(lowerCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : int = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : Tuple = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Union[str, Any] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_UpperCAmelCase : List[str] = load_dataset("csv" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : Dict = load_dataset("json" , data_files=lowerCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : List[str] = raw_datasets["train"].features["label"].names
_UpperCAmelCase : Tuple = len(lowerCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase , )
_UpperCAmelCase : Union[str, Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowerCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Tuple = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : List[Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : List[Any] = {"Refused": 0, "Entailed": 1}
_UpperCAmelCase : Optional[int] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
_UpperCAmelCase : int = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase: str ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase: List[Any] ):
_UpperCAmelCase : Any = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_UpperCAmelCase : List[str] = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : Tuple = examples["statement"]
_UpperCAmelCase : List[Any] = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_UpperCAmelCase : Dict = tokenizer(lowerCAmelCase , lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase )
_UpperCAmelCase : List[str] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_UpperCAmelCase : List[Any] = raw_datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : Dict = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Union[str, Any] = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Any = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_UpperCAmelCase : Dict = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase ) ) , 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase: EvalPrediction ):
_UpperCAmelCase : Optional[int] = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase ) else p.predictions
_UpperCAmelCase : Optional[Any] = np.argmax(lowerCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : str = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : int = DataCollatorWithPadding(lowerCAmelCase , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : List[str] = None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase , args=lowerCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase , tokenizer=lowerCAmelCase , data_collator=lowerCAmelCase , )
# Training
if training_args.do_train:
_UpperCAmelCase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : Dict = last_checkpoint
_UpperCAmelCase : str = trainer.train(resume_from_checkpoint=lowerCAmelCase )
_UpperCAmelCase : Tuple = train_result.metrics
_UpperCAmelCase : Optional[int] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase )
)
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowerCAmelCase )
trainer.save_metrics("train" , lowerCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Optional[int] = trainer.evaluate(eval_dataset=lowerCAmelCase )
_UpperCAmelCase : Optional[Any] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase )
_UpperCAmelCase : Any = min(lowerCAmelCase , len(lowerCAmelCase ) )
trainer.log_metrics("eval" , lowerCAmelCase )
trainer.save_metrics("eval" , lowerCAmelCase )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : int = predict_dataset.remove_columns("label" )
_UpperCAmelCase : Any = trainer.predict(lowerCAmelCase , metric_key_prefix="predict" ).predictions
_UpperCAmelCase : List[str] = np.argmax(lowerCAmelCase , axis=1 )
_UpperCAmelCase : int = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(lowerCAmelCase ):
_UpperCAmelCase : List[Any] = label_list[item]
writer.write(F'{index}\t{item}\n' )
_UpperCAmelCase : int = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase )
else:
trainer.create_model_card(**lowerCAmelCase )
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: Dict ) -> Union[str, Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 189
| 1
|
"""simple docstring"""
import argparse
import os
import re
_A = """src/diffusers"""
# Pattern that looks at the indentation in a line.
_A = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
_A = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
_A = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
_A = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
_A = re.compile(r"""\[([^\]]+)\]""")
def lowercase_ ( __UpperCAmelCase ) -> List[str]:
lowerCAmelCase__ : Union[str, Any] = _re_indent.search(SCREAMING_SNAKE_CASE_ )
return "" if search is None else search.groups()[0]
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase="" , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> int:
lowerCAmelCase__ : Tuple = 0
lowerCAmelCase__ : List[str] = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(SCREAMING_SNAKE_CASE_ ):
index += 1
lowerCAmelCase__ : int = ["""\n""".join(lines[:index] )]
else:
lowerCAmelCase__ : Union[str, Any] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowerCAmelCase__ : str = [lines[index]]
index += 1
while index < len(SCREAMING_SNAKE_CASE_ ) and (end_prompt is None or not lines[index].startswith(SCREAMING_SNAKE_CASE_ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(SCREAMING_SNAKE_CASE_ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
if index < len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCAmelCase__ : int = [lines[index + 1]]
index += 1
else:
lowerCAmelCase__ : Union[str, Any] = []
else:
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : str = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(SCREAMING_SNAKE_CASE_ ) > 0:
blocks.append("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(SCREAMING_SNAKE_CASE_ ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def lowercase_ ( __UpperCAmelCase ) -> int:
def _inner(__UpperCAmelCase ):
return key(SCREAMING_SNAKE_CASE_ ).lower().replace("""_""" , """""" )
return _inner
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> int:
def noop(__UpperCAmelCase ):
return x
if key is None:
lowerCAmelCase__ : Tuple = noop
# Constants are all uppercase, they go first.
lowerCAmelCase__ : Optional[Any] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowerCAmelCase__ : Optional[int] = [obj for obj in objects if key(SCREAMING_SNAKE_CASE_ )[0].isupper() and not key(SCREAMING_SNAKE_CASE_ ).isupper()]
# Functions begin with a lowercase, they go last.
lowerCAmelCase__ : List[str] = [obj for obj in objects if not key(SCREAMING_SNAKE_CASE_ )[0].isupper()]
lowerCAmelCase__ : Tuple = ignore_underscore(SCREAMING_SNAKE_CASE_ )
return sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ ) + sorted(SCREAMING_SNAKE_CASE_ , key=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( __UpperCAmelCase ) -> str:
def _replace(__UpperCAmelCase ):
lowerCAmelCase__ : Optional[Any] = match.groups()[0]
if "," not in imports:
return f"""[{imports}]"""
lowerCAmelCase__ : Optional[Any] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ : List[Any] = keys[:-1]
return "[" + ", ".join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] ) + "]"
lowerCAmelCase__ : int = import_statement.split("""\n""" )
if len(SCREAMING_SNAKE_CASE_ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowerCAmelCase__ : str = 2 if lines[1].strip() == """[""" else 1
lowerCAmelCase__ : Optional[int] = [(i, _re_strip_line.search(SCREAMING_SNAKE_CASE_ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowerCAmelCase__ : str = sort_objects(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : x[1] )
lowerCAmelCase__ : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(SCREAMING_SNAKE_CASE_ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowerCAmelCase__ : str = _re_bracket_content.sub(_replace , lines[1] )
else:
lowerCAmelCase__ : str = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowerCAmelCase__ : str = keys[:-1]
lowerCAmelCase__ : List[str] = get_indent(lines[1] ) + """, """.join([f"""\"{k}\"""" for k in sort_objects(SCREAMING_SNAKE_CASE_ )] )
return "\n".join(SCREAMING_SNAKE_CASE_ )
else:
# Finally we have to deal with imports fitting on one line
lowerCAmelCase__ : List[Any] = _re_bracket_content.sub(_replace , SCREAMING_SNAKE_CASE_ )
return import_statement
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase=True ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_ , """r""" ) as f:
lowerCAmelCase__ : int = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowerCAmelCase__ : str = split_code_in_indented_blocks(
SCREAMING_SNAKE_CASE_ , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(SCREAMING_SNAKE_CASE_ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowerCAmelCase__ : Optional[Any] = main_blocks[block_idx]
lowerCAmelCase__ : Dict = block.split("""\n""" )
# Get to the start of the imports.
lowerCAmelCase__ : List[Any] = 0
while line_idx < len(SCREAMING_SNAKE_CASE_ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
else:
line_idx += 1
if line_idx >= len(SCREAMING_SNAKE_CASE_ ):
continue
# Ignore beginning and last line: they don't contain anything.
lowerCAmelCase__ : Tuple = """\n""".join(block_lines[line_idx:-1] )
lowerCAmelCase__ : str = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowerCAmelCase__ : int = split_code_in_indented_blocks(SCREAMING_SNAKE_CASE_ , indent_level=SCREAMING_SNAKE_CASE_ )
# We have two categories of import key: list or _import_structure[key].append/extend
lowerCAmelCase__ : Dict = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowerCAmelCase__ : Optional[Any] = [(pattern.search(SCREAMING_SNAKE_CASE_ ).groups()[0] if pattern.search(SCREAMING_SNAKE_CASE_ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowerCAmelCase__ : Optional[Any] = [(i, key) for i, key in enumerate(SCREAMING_SNAKE_CASE_ ) if key is not None]
lowerCAmelCase__ : int = [x[0] for x in sorted(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowerCAmelCase__ : Dict = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(SCREAMING_SNAKE_CASE_ )
count += 1
# And we put our main block back together with its first and last line.
lowerCAmelCase__ : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(SCREAMING_SNAKE_CASE_ ):
if check_only:
return True
else:
print(f"""Overwriting {file}.""" )
with open(SCREAMING_SNAKE_CASE_ , """w""" ) as f:
f.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( __UpperCAmelCase=True ) -> Optional[Any]:
lowerCAmelCase__ : int = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE_ ):
if "__init__.py" in files:
lowerCAmelCase__ : Union[str, Any] = sort_imports(os.path.join(SCREAMING_SNAKE_CASE_ , """__init__.py""" ) , check_only=SCREAMING_SNAKE_CASE_ )
if result:
lowerCAmelCase__ : Dict = [os.path.join(SCREAMING_SNAKE_CASE_ , """__init__.py""" )]
if len(SCREAMING_SNAKE_CASE_ ) > 0:
raise ValueError(f"""Would overwrite {len(SCREAMING_SNAKE_CASE_ )} files, run `make style`.""" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
_A = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 242
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[int] , SCREAMING_SNAKE_CASE_: int ) -> int:
'''simple docstring'''
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
A__ = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = CLIPConfig
__lowerCamelCase = ['CLIPEncoderLayer']
def __init__( self , lowercase ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowercase )
A__ = CLIPVisionModel(config.vision_config )
A__ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowercase )
A__ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(17 ) , requires_grad=lowercase )
A__ = nn.Parameter(torch.ones(3 ) , requires_grad=lowercase )
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A__ = cosine_distance(lowercase , self.special_care_embeds ).cpu().float().numpy()
A__ = cosine_distance(lowercase , self.concept_embeds ).cpu().float().numpy()
A__ = []
A__ = image_embeds.shape[0]
for i in range(lowercase ):
A__ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A__ = special_cos_dist[i][concept_idx]
A__ = self.special_care_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A__ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A__ = cos_dist[i][concept_idx]
A__ = self.concept_embeds_weights[concept_idx].item()
A__ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowercase )
result.append(lowercase )
A__ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCamelCase ( self , lowercase , lowercase ) -> Any:
'''simple docstring'''
A__ = self.vision_model(lowercase )[1] # pooled_output
A__ = self.visual_projection(lowercase )
A__ = cosine_distance(lowercase , self.special_care_embeds )
A__ = cosine_distance(lowercase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A__ = 0.0
A__ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A__ = torch.any(special_scores > 0 , dim=1 )
A__ = special_care * 0.01
A__ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A__ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A__ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 68
| 0
|
import numpy as np
from PIL import Image
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# compute the shape of the output matrix
__lowerCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__lowerCamelCase = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__lowerCamelCase = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase = 0
__lowerCamelCase = 0
return updated_arr
def lowerCamelCase_ ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> np.ndarray:
"""simple docstring"""
__lowerCamelCase = np.array(UpperCamelCase__ )
if arr.shape[0] != arr.shape[1]:
raise ValueError('The input array is not a square matrix' )
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
__lowerCamelCase = 0
# compute the shape of the output matrix
__lowerCamelCase = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__lowerCamelCase = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__lowerCamelCase = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__lowerCamelCase = 0
__lowerCamelCase = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name="avgpooling", verbose=True)
# Loading the image
__A = Image.open("path_to_image")
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 357
|
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Union[str, Any] = """wav2vec2"""
def __init__( self , __lowercase=32 , __lowercase=768 , __lowercase=12 , __lowercase=12 , __lowercase=3_072 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.0 , __lowercase=0.0 , __lowercase=0.1 , __lowercase=0.1 , __lowercase=0.02 , __lowercase=1E-5 , __lowercase="group" , __lowercase="gelu" , __lowercase=(512, 512, 512, 512, 512, 512, 512) , __lowercase=(5, 2, 2, 2, 2, 2, 2) , __lowercase=(10, 3, 3, 3, 3, 2, 2) , __lowercase=False , __lowercase=128 , __lowercase=16 , __lowercase=False , __lowercase=True , __lowercase=0.05 , __lowercase=10 , __lowercase=2 , __lowercase=0.0 , __lowercase=10 , __lowercase=0 , __lowercase=320 , __lowercase=2 , __lowercase=0.1 , __lowercase=100 , __lowercase=256 , __lowercase=256 , __lowercase=0.1 , __lowercase="sum" , __lowercase=False , __lowercase=False , __lowercase=256 , __lowercase=(512, 512, 512, 512, 1_500) , __lowercase=(5, 3, 3, 1, 1) , __lowercase=(1, 2, 3, 1, 1) , __lowercase=512 , __lowercase=0 , __lowercase=1 , __lowercase=2 , __lowercase=False , __lowercase=3 , __lowercase=2 , __lowercase=3 , __lowercase=None , __lowercase=None , **__lowercase , ) -> int:
super().__init__(**__lowercase , pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase)
__UpperCamelCase :Any = hidden_size
__UpperCamelCase :int = feat_extract_norm
__UpperCamelCase :Tuple = feat_extract_activation
__UpperCamelCase :Union[str, Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :int = list(__lowercase)
__UpperCamelCase :List[Any] = conv_bias
__UpperCamelCase :Optional[int] = num_conv_pos_embeddings
__UpperCamelCase :Dict = num_conv_pos_embedding_groups
__UpperCamelCase :Any = len(self.conv_dim)
__UpperCamelCase :List[str] = num_hidden_layers
__UpperCamelCase :int = intermediate_size
__UpperCamelCase :str = hidden_act
__UpperCamelCase :Any = num_attention_heads
__UpperCamelCase :int = hidden_dropout
__UpperCamelCase :Tuple = attention_dropout
__UpperCamelCase :List[str] = activation_dropout
__UpperCamelCase :Optional[Any] = feat_proj_dropout
__UpperCamelCase :Any = final_dropout
__UpperCamelCase :Any = layerdrop
__UpperCamelCase :str = layer_norm_eps
__UpperCamelCase :Optional[Any] = initializer_range
__UpperCamelCase :List[str] = vocab_size
__UpperCamelCase :str = do_stable_layer_norm
__UpperCamelCase :Union[str, Any] = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__UpperCamelCase :List[Any] = apply_spec_augment
__UpperCamelCase :Tuple = mask_time_prob
__UpperCamelCase :int = mask_time_length
__UpperCamelCase :Dict = mask_time_min_masks
__UpperCamelCase :str = mask_feature_prob
__UpperCamelCase :List[str] = mask_feature_length
__UpperCamelCase :Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__UpperCamelCase :Optional[Any] = num_codevectors_per_group
__UpperCamelCase :List[Any] = num_codevector_groups
__UpperCamelCase :Tuple = contrastive_logits_temperature
__UpperCamelCase :Optional[int] = feat_quantizer_dropout
__UpperCamelCase :Optional[int] = num_negatives
__UpperCamelCase :List[Any] = codevector_dim
__UpperCamelCase :str = proj_codevector_dim
__UpperCamelCase :List[str] = diversity_loss_weight
# ctc loss
__UpperCamelCase :Tuple = ctc_loss_reduction
__UpperCamelCase :Tuple = ctc_zero_infinity
# adapter
__UpperCamelCase :List[str] = add_adapter
__UpperCamelCase :Tuple = adapter_kernel_size
__UpperCamelCase :str = adapter_stride
__UpperCamelCase :Tuple = num_adapter_layers
__UpperCamelCase :Tuple = output_hidden_size or hidden_size
__UpperCamelCase :Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__UpperCamelCase :Optional[Any] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__UpperCamelCase :Optional[int] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :List[Any] = list(__lowercase)
__UpperCamelCase :str = xvector_output_dim
@property
def UpperCamelCase__ ( self) -> List[str]:
return functools.reduce(operator.mul , self.conv_stride , 1)
| 43
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 14
| 0
|
import math
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ )-> list:
"""simple docstring"""
UpperCamelCase_ = [True] * n
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
UpperCamelCase_ = i * 2
while index < n:
UpperCamelCase_ = False
UpperCamelCase_ = index + i
UpperCamelCase_ = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def lowerCAmelCase( SCREAMING_SNAKE_CASE_ = 9_9_9_9_6_6_6_6_3_3_3_3 )-> int:
"""simple docstring"""
UpperCamelCase_ = math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) + 1_0_0
UpperCamelCase_ = prime_sieve(SCREAMING_SNAKE_CASE_ )
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = primes[prime_index]
while (last_prime**2) <= limit:
UpperCamelCase_ = primes[prime_index + 1]
UpperCamelCase_ = last_prime**2
UpperCamelCase_ = next_prime**2
# Get numbers divisible by lps(current)
UpperCamelCase_ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
UpperCamelCase_ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
UpperCamelCase_ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
UpperCamelCase_ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 370
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=5 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=4 , )-> Union[str, Any]:
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_attention_mask
UpperCamelCase_ = use_token_type_ids
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = type_vocab_size
UpperCamelCase_ = type_sequence_label_size
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_choices
def UpperCAmelCase_ ( self )-> Optional[Any]:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = None
if self.use_attention_mask:
UpperCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase_ = None
if self.use_token_type_ids:
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase_ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase_ ( self )-> str:
UpperCamelCase_ = self.prepare_config_and_inputs()
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = config_and_inputs
UpperCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( snake_case , unittest.TestCase ):
UpperCamelCase_ :Optional[Any] = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase_ ( self )-> Optional[int]:
UpperCamelCase_ = FlaxAlbertModelTester(self )
@slow
def UpperCAmelCase_ ( self )-> str:
for model_class_name in self.all_model_classes:
UpperCamelCase_ = model_class_name.from_pretrained("albert-base-v2" )
UpperCamelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def UpperCAmelCase_ ( self )-> List[str]:
UpperCamelCase_ = FlaxAlbertModel.from_pretrained("albert-base-v2" )
UpperCamelCase_ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
UpperCamelCase_ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase_ = model(_lowercase , attention_mask=_lowercase )[0]
UpperCamelCase_ = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
UpperCamelCase_ = np.array(
[[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1e-4 ) )
| 60
| 0
|
'''simple docstring'''
from __future__ import annotations
_A : Tuple = []
def UpperCamelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int , snake_case_ : int ) -> bool:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
if board[row][i] == 1:
return False
for i in range(len(snake_case_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(snake_case_ , -1 , -1 ) , range(snake_case_ , len(snake_case_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCamelCase_ ( snake_case_ : list[list[int]] , snake_case_ : int ) -> bool:
'''simple docstring'''
if row >= len(snake_case_ ):
solution.append(snake_case_ )
printboard(snake_case_ )
print()
return True
for i in range(len(snake_case_ ) ):
if is_safe(snake_case_ , snake_case_ , snake_case_ ):
__lowerCAmelCase = 1
solve(snake_case_ , row + 1 )
__lowerCAmelCase = 0
return False
def UpperCamelCase_ ( snake_case_ : list[list[int]] ) -> None:
'''simple docstring'''
for i in range(len(snake_case_ ) ):
for j in range(len(snake_case_ ) ):
if board[i][j] == 1:
print("""Q""" , end=""" """ )
else:
print(""".""" , end=""" """ )
print()
# n=int(input("The no. of queens"))
_A : Any = 8
_A : int = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 229
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : Union[str, Any]=2_81_23 ) -> str:
'''simple docstring'''
__lowerCAmelCase = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
__lowerCAmelCase = set()
__lowerCAmelCase = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(snake_case_ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 229
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
snake_case = '''nat'''
snake_case = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , __UpperCAmelCase : List[Any]=4 , __UpperCAmelCase : int=3 , __UpperCAmelCase : Any=64 , __UpperCAmelCase : Dict=[3, 4, 6, 5] , __UpperCAmelCase : Dict=[2, 4, 8, 16] , __UpperCAmelCase : Tuple=7 , __UpperCAmelCase : List[Any]=3.0 , __UpperCAmelCase : Any=True , __UpperCAmelCase : Optional[Any]=0.0 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : Union[str, Any]="gelu" , __UpperCAmelCase : int=0.02 , __UpperCAmelCase : List[Any]=1E-5 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Union[str, Any]=None , __UpperCAmelCase : Dict=None , **__UpperCAmelCase : Any , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
_A = patch_size
_A = num_channels
_A = embed_dim
_A = depths
_A = len(__UpperCAmelCase )
_A = num_heads
_A = kernel_size
_A = mlp_ratio
_A = qkv_bias
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = drop_path_rate
_A = hidden_act
_A = layer_norm_eps
_A = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_A = int(embed_dim * 2 ** (len(__UpperCAmelCase ) - 1) )
_A = layer_scale_init_value
_A = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__UpperCAmelCase ) + 1 )]
_A , _A = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase , out_indices=__UpperCAmelCase , stage_names=self.stage_names )
| 174
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCamelCase_ = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCamelCase_ = {
'''allenai/led-base-16384''': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
_A = bs[:]
_A = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowercase )
cs.append(2**8 + n )
n += 1
_A = [chr(__lowercase ) for n in cs]
return dict(zip(__lowercase , __lowercase ) )
def __lowercase ( __lowercase ) -> Dict:
'''simple docstring'''
_A = set()
_A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A = char
return pairs
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : Any="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Optional[int]="<pad>" , __UpperCAmelCase : Optional[Any]="<mask>" , __UpperCAmelCase : Any=False , **__UpperCAmelCase : Any , ):
'''simple docstring'''
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_A = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , **__UpperCAmelCase , )
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
_A = json.load(__UpperCAmelCase )
_A = {v: k for k, v in self.encoder.items()}
_A = errors # how to handle errors in decoding
_A = bytes_to_unicode()
_A = {v: k for k, v in self.byte_encoder.items()}
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
_A = merges_handle.read().split("\n" )[1:-1]
_A = [tuple(merge.split() ) for merge in bpe_merges]
_A = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
_A = {}
_A = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_A = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_A = tuple(__UpperCAmelCase )
_A = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
_A = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A = bigram
_A = []
_A = 0
while i < len(__UpperCAmelCase ):
try:
_A = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_A = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A = tuple(__UpperCAmelCase )
_A = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
_A = get_pairs(__UpperCAmelCase )
_A = " ".join(__UpperCAmelCase )
_A = word
return word
def lowerCAmelCase ( self : Any , __UpperCAmelCase : str ):
'''simple docstring'''
_A = []
for token in re.findall(self.pat , __UpperCAmelCase ):
_A = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCAmelCase ).split(" " ) )
return bpe_tokens
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Optional[int] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Dict ):
'''simple docstring'''
_A = "".join(__UpperCAmelCase )
_A = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
_A = 0
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
_A = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_A = [self.cls_token_id]
_A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1, 1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ):
'''simple docstring'''
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase ( self : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict=False , **__UpperCAmelCase : Any ):
'''simple docstring'''
_A = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__UpperCAmelCase ) > 0 and not text[0].isspace()):
_A = " " + text
return (text, kwargs)
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[bool] = None , ):
'''simple docstring'''
_A = super()._pad(
encoded_inputs=__UpperCAmelCase , max_length=__UpperCAmelCase , padding_strategy=__UpperCAmelCase , pad_to_multiple_of=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
_A = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_A = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_A = len(encoded_inputs["global_attention_mask"] ) != len(__UpperCAmelCase )
if needs_to_be_padded:
_A = len(__UpperCAmelCase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_A = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
_A = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 174
| 1
|
from itertools import product
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> list[int]:
UpperCamelCase__ : Union[str, Any] = sides_number
UpperCamelCase__ : Optional[int] = max_face_number * dice_number
UpperCamelCase__ : Tuple = [0] * (max_total + 1)
UpperCamelCase__ : Union[str, Any] = 1
UpperCamelCase__ : Optional[int] = range(__lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(__lowerCAmelCase , repeat=__lowerCAmelCase ):
UpperCamelCase__ : Tuple = sum(__lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def SCREAMING_SNAKE_CASE ( ) -> float:
UpperCamelCase__ : str = total_frequency_distribution(
sides_number=4 , dice_number=9 )
UpperCamelCase__ : Any = total_frequency_distribution(
sides_number=6 , dice_number=6 )
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Dict = 9
UpperCamelCase__ : Tuple = 4 * 9
UpperCamelCase__ : int = 6
for peter_total in range(__lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
UpperCamelCase__ : Any = (4**9) * (6**6)
UpperCamelCase__ : Optional[Any] = peter_wins_count / total_games_number
UpperCamelCase__ : Any = round(__lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""")
| 189
|
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
while b:
UpperCamelCase__ , UpperCamelCase__ : int = b, a % b
return a
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return a if b == 0 else euclidean_gcd_recursive(__lowerCAmelCase , a % b )
def SCREAMING_SNAKE_CASE ( ) -> str:
print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 189
| 1
|
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class a__ ( snake_case__ ):
def __init__( self , _A , _A=7_6_8 ):
"""simple docstring"""
super().__init__(_A )
__lowerCAmelCase = proj_size
__lowerCAmelCase = CLIPVisionModel(_A )
__lowerCAmelCase = PaintByExampleMapper(_A )
__lowerCAmelCase = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __SCREAMING_SNAKE_CASE( self , _A , _A=False ):
"""simple docstring"""
__lowerCAmelCase = self.model(pixel_values=_A )
__lowerCAmelCase = clip_output.pooler_output
__lowerCAmelCase = self.mapper(latent_states[:, None] )
__lowerCAmelCase = self.final_layer_norm(_A )
__lowerCAmelCase = self.proj_out(_A )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class a__ ( nn.Module ):
def __init__( self , _A ):
"""simple docstring"""
super().__init__()
__lowerCAmelCase = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase = config.hidden_size
__lowerCAmelCase = 1
__lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(_A , _A , _A , activation_fn="gelu" , attention_bias=_A )
for _ in range(_A )
] )
def __SCREAMING_SNAKE_CASE( self , _A ):
"""simple docstring"""
for block in self.blocks:
__lowerCAmelCase = block(_A )
return hidden_states
| 371
|
UpperCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 102
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=10 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(__lowerCAmelCase , """schedule.bin""" )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self , _a , _a , _a ) -> str:
"""simple docstring"""
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for a, b in zip(snake_case__ , snake_case__ ):
self.assertAlmostEqual(snake_case__ , snake_case__ , delta=snake_case__ )
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case__ )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE__ : str = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
SCREAMING_SNAKE_CASE__ : str = criterion(snake_case__ , snake_case__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([0.1, -0.2, -0.1] , requires_grad=snake_case__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([0.4, 0.2, -0.5] )
SCREAMING_SNAKE_CASE__ : Dict = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-3_0, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=snake_case__ , weight_decay=0.0 , relative_step=snake_case__ , scale_parameter=snake_case__ , warmup_init=snake_case__ , )
for _ in range(1_000 ):
SCREAMING_SNAKE_CASE__ : Dict = criterion(snake_case__ , snake_case__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __a (unittest.TestCase):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :int = nn.Linear(50 , 50) if is_torch_available() else None
_SCREAMING_SNAKE_CASE :int = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
_SCREAMING_SNAKE_CASE :Any = 10
def _a ( self , _a , _a , _a , _a=None ) -> List[Any]:
"""simple docstring"""
self.assertEqual(len(snake_case__ ) , len(snake_case__ ) )
for a, b in zip(snake_case__ , snake_case__ ):
self.assertAlmostEqual(snake_case__ , snake_case__ , delta=snake_case__ , msg=snake_case__ )
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = {'''num_warmup_steps''': 2, '''num_training_steps''': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'''num_warmup_steps''': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, '''num_cycles''': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, '''power''': 2.0, '''lr_end''': 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{'''num_warmup_steps''': 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
SCREAMING_SNAKE_CASE__ : List[Any] = data
SCREAMING_SNAKE_CASE__ : List[str] = scheduler_func(self.optimizer , **snake_case__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = unwrap_schedule(snake_case__ , self.num_steps )
self.assertListAlmostEqual(
snake_case__ , snake_case__ , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_func(self.optimizer , **snake_case__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(snake_case__ ) # wrap to test picklability of the schedule
SCREAMING_SNAKE_CASE__ : Optional[Any] = unwrap_and_save_reload_schedule(snake_case__ , self.num_steps )
self.assertListEqual(snake_case__ , snake_case__ , msg=f'''failed for {scheduler_func} in save and reload''' )
class __a :
'''simple docstring'''
def __init__( self , _a ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = fn
def __call__( self , *_a , **_a ) -> Optional[Any]:
"""simple docstring"""
return self.fn(*snake_case__ , **snake_case__ )
@classmethod
def _a ( self , _a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = list(map(self , scheduler.lr_lambdas ) )
| 132
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348
| 0
|
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = "char"
__A = "bpe"
__A = "wp"
_lowercase: List[str] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
__A = ["image_processor", "char_tokenizer"]
__A = "ViTImageProcessor"
__A = "MgpstrTokenizer"
def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase_ , )
a = kwargs.pop("feature_extractor" )
a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
a = tokenizer
a = AutoTokenizer.from_pretrained("gpt2" )
a = AutoTokenizer.from_pretrained("bert-base-uncased" )
super().__init__(lowerCamelCase_ , lowerCamelCase_ )
def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You need to specify either an `images` or `text` input to process." )
if images is not None:
a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is not None:
a = self.char_tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
a = encodings["input_ids"]
return inputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a , a , a = sequences
a = char_preds.size(0 )
a , a = self._decode_helper(lowerCamelCase_ , "char" )
a , a = self._decode_helper(lowerCamelCase_ , "bpe" )
a , a = self._decode_helper(lowerCamelCase_ , "wp" )
a = []
a = []
for i in range(lowerCamelCase_ ):
a = [char_scores[i], bpe_scores[i], wp_scores[i]]
a = [char_strs[i], bpe_strs[i], wp_strs[i]]
a = scores.index(max(lowerCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
a = {}
a = final_strs
a = final_scores
a = char_strs
a = bpe_strs
a = wp_strs
return out
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if format == DecodeType.CHARACTER:
a = self.char_decode
a = 1
a = "[s]"
elif format == DecodeType.BPE:
a = self.bpe_decode
a = 2
a = "#"
elif format == DecodeType.WORDPIECE:
a = self.wp_decode
a = 102
a = "[SEP]"
else:
raise ValueError(F'''Format {format} is not supported.''' )
a , a = [], []
a = pred_logits.size(0 )
a = pred_logits.size(1 )
a , a = pred_logits.topk(1 , dim=-1 , largest=lowerCamelCase_ , sorted=lowerCamelCase_ )
a = preds_index.view(-1 , lowerCamelCase_ )[:, 1:]
a = decoder(lowerCamelCase_ )
a , a = torch.nn.functional.softmax(lowerCamelCase_ , dim=2 ).max(dim=2 )
a = preds_max_prob[:, 1:]
for index in range(lowerCamelCase_ ):
a = preds_str[index].find(lowerCamelCase_ )
a = preds_str[index][:pred_eos]
a = preds_index[index].cpu().tolist()
a = pred_index.index(lowerCamelCase_ ) if eos_token in pred_index else -1
a = preds_max_prob[index][: pred_eos_index + 1]
a = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(lowerCamelCase_ )
conf_scores.append(lowerCamelCase_ )
return dec_strs, conf_scores
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
return self.bpe_tokenizer.batch_decode(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCamelCase_ )]
return decode_strs
| 71
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def a( A : List[str] , A : int=0.999 , A : Union[str, Any]="cosine" , ) -> Optional[int]:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
a = []
for i in range(A ):
a = i / num_diffusion_timesteps
a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A ) / alpha_bar_fn(A ) , A ) )
return torch.tensor(A , dtype=torch.floataa )
class _lowercase ( lowerCAmelCase, lowerCAmelCase ):
"""simple docstring"""
__A = [e.name for e in KarrasDiffusionSchedulers]
__A = 2
@register_to_config
def __init__(self , lowerCamelCase_ = 1000 , lowerCamelCase_ = 0.0_0085 , lowerCamelCase_ = 0.012 , lowerCamelCase_ = "linear" , lowerCamelCase_ = None , lowerCamelCase_ = "epsilon" , lowerCamelCase_ = "linspace" , lowerCamelCase_ = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a = torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "linear":
a = torch.linspace(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCamelCase_ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a = betas_for_alpha_bar(lowerCamelCase_ )
else:
raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' )
a = 1.0 - self.betas
a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if schedule_timesteps is None:
a = self.timesteps
a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
a = 1 if len(lowerCamelCase_ ) > 1 else 0
else:
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
if self.state_in_first_order:
a = self.sigmas[step_index]
else:
a = self.sigmas_interpol[step_index]
a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , ):
"""simple docstring"""
a = num_inference_steps
a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a = np.linspace(0 , num_train_timesteps - 1 , lowerCamelCase_ , dtype=lowerCamelCase_ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(0 , lowerCamelCase_ ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase_ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a = (np.arange(lowerCamelCase_ , 0 , -step_ratio )).round().copy().astype(lowerCamelCase_ )
timesteps -= 1
else:
raise ValueError(
F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
a = torch.from_numpy(np.log(lowerCamelCase_ ) ).to(lowerCamelCase_ )
a = np.interp(lowerCamelCase_ , np.arange(0 , len(lowerCamelCase_ ) ) , lowerCamelCase_ )
a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
a = torch.from_numpy(lowerCamelCase_ ).to(device=lowerCamelCase_ )
# interpolate sigmas
a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(lowerCamelCase_ ).startswith("mps" ):
# mps does not support float64
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=torch.floataa )
else:
a = torch.from_numpy(lowerCamelCase_ ).to(lowerCamelCase_ )
# interpolate timesteps
a = self.sigma_to_t(lowerCamelCase_ ).to(lowerCamelCase_ , dtype=timesteps.dtype )
a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
a = torch.cat([timesteps[:1], interleaved_timesteps] )
a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a = defaultdict(lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = sigma.log()
# get distribution
a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
a = low_idx + 1
a = self.log_sigmas[low_idx]
a = self.log_sigmas[high_idx]
# interpolate sigmas
a = (low - log_sigma) / (low - high)
a = w.clamp(0 , 1 )
# transform interpolation to time range
a = (1 - w) * low_idx + w * high_idx
a = t.view(sigma.shape )
return t
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.sample is None
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = True , ):
"""simple docstring"""
a = self.index_for_timestep(lowerCamelCase_ )
# advance index counter by 1
a = timestep.cpu().item() if torch.is_tensor(lowerCamelCase_ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a = self.sigmas[step_index]
a = self.sigmas_interpol[step_index + 1]
a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
a = self.sigmas[step_index - 1]
a = self.sigmas_interpol[step_index]
a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a = 0
a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a = sigma_hat if self.state_in_first_order else sigma_interpol
a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a = sigma_interpol - sigma_hat
# store for 2nd order step
a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
a = sigma_next - sigma_hat
a = self.sample
a = None
a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ):
"""simple docstring"""
a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase_ ):
# mps does not support float64
a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
a = self.timesteps.to(original_samples.device )
a = timesteps.to(original_samples.device )
a = [self.index_for_timestep(lowerCamelCase_ , lowerCamelCase_ ) for t in timesteps]
a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
a = sigma.unsqueeze(-1 )
a = original_samples + noise * sigma
return noisy_samples
def __len__(self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 71
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: int ,__UpperCamelCase: bool ,__UpperCamelCase: list[int] ,__UpperCamelCase: float ):
"""simple docstring"""
if depth < 0:
raise ValueError('Depth cannot be less than 0' )
if not scores:
raise ValueError('Scores cannot be empty' )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 ,node_index * 2 ,_snake_case ,_snake_case ,_snake_case ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_snake_case ,_snake_case ,_snake_case ) ,)
if is_max
else min(
minimax(depth + 1 ,node_index * 2 ,_snake_case ,_snake_case ,_snake_case ) ,minimax(depth + 1 ,node_index * 2 + 1 ,_snake_case ,_snake_case ,_snake_case ) ,)
)
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
SCREAMING_SNAKE_CASE : Union[str, Any] = math.log(len(_snake_case ) ,2 )
print(f"Optimal value : {minimax(0 ,0 ,_snake_case ,_snake_case ,_snake_case )}" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 251
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case_( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Optional[Any]=3_7 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=5_1_2 , UpperCamelCase_ : Optional[Any]=1_6 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : Tuple = use_attention_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : int = num_choices
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_attention_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase__ ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Any = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ )[0]
lowerCAmelCase : str = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , UpperCamelCase_ )
# compare the actual values for a slice.
lowerCAmelCase : Optional[Any] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : str = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase : str = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def __lowerCamelCase (UpperCAmelCase__ : int="ro" , UpperCAmelCase__ : Any="en" , UpperCAmelCase__ : Optional[Any]="wmt16" , UpperCAmelCase__ : Any=None ):
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError("run pip install datasets" )
SCREAMING_SNAKE_CASE = F"{src_lang}-{tgt_lang}"
print(F"Converting {dataset}-{pair}" )
SCREAMING_SNAKE_CASE = datasets.load_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if save_dir is None:
SCREAMING_SNAKE_CASE = F"{dataset}-{pair}"
SCREAMING_SNAKE_CASE = Path(__SCREAMING_SNAKE_CASE )
save_dir.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
for split in ds.keys():
print(F"Splitting {split} with {ds[split].num_rows} records" )
# to save to val.source, val.target like summary datasets
SCREAMING_SNAKE_CASE = """val""" if split == """validation""" else split
SCREAMING_SNAKE_CASE = save_dir.joinpath(F"{fn}.source" )
SCREAMING_SNAKE_CASE = save_dir.joinpath(F"{fn}.target" )
SCREAMING_SNAKE_CASE = src_path.open("w+" )
SCREAMING_SNAKE_CASE = tgt_path.open("w+" )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
SCREAMING_SNAKE_CASE = x["""translation"""]
src_fp.write(ex[src_lang] + "\n" )
tgt_fp.write(ex[tgt_lang] + "\n" )
print(F"Saved {dataset} dataset to {save_dir}" )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 370
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] ):
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_value
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 206
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class a__ ( enum.Enum ):
"""simple docstring"""
__UpperCamelCase : Any = 0
__UpperCamelCase : List[str] = 1
@add_end_docstrings(__A )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = 'generated'
def __init__(self , *__lowercase , **__lowercase ):
super().__init__(*__lowercase , **__lowercase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case (self , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , __lowercase=None , **__lowercase , ):
__lowerCAmelCase = {}
if truncation is not None:
__lowerCAmelCase = truncation
__lowerCAmelCase = generate_kwargs
__lowerCAmelCase = {}
if return_tensors is not None and return_type is None:
__lowerCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
__lowerCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase = self.tokenizer.encode(__lowercase , add_special_tokens=__lowercase )
if len(__lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
__lowerCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
return True
def _snake_case (self , *__lowercase , __lowercase ):
__lowerCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , __lowercase ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
__lowerCAmelCase = ([prefix + arg for arg in args[0]],)
__lowerCAmelCase = True
elif isinstance(args[0] , __lowercase ):
__lowerCAmelCase = (prefix + args[0],)
__lowerCAmelCase = False
else:
raise ValueError(
F""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
__lowerCAmelCase = self.tokenizer(*__lowercase , padding=__lowercase , truncation=__lowercase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self , *__lowercase , **__lowercase ):
__lowerCAmelCase = super().__call__(*__lowercase , **__lowercase )
if (
isinstance(args[0] , __lowercase )
and all(isinstance(__lowercase , __lowercase ) for el in args[0] )
and all(len(__lowercase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case (self , __lowercase , __lowercase=TruncationStrategy.DO_NOT_TRUNCATE , **__lowercase ):
__lowerCAmelCase = self._parse_and_tokenize(__lowercase , truncation=__lowercase , **__lowercase )
return inputs
def _snake_case (self , __lowercase , **__lowercase ):
if self.framework == "pt":
__lowerCAmelCase , __lowerCAmelCase = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
__lowerCAmelCase , __lowerCAmelCase = tf.shape(model_inputs['''input_ids'''] ).numpy()
__lowerCAmelCase = generate_kwargs.get('''min_length''' , self.model.config.min_length )
__lowerCAmelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(__lowercase , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
__lowerCAmelCase = self.model.generate(**__lowercase , **__lowercase )
__lowerCAmelCase = output_ids.shape[0]
if self.framework == "pt":
__lowerCAmelCase = output_ids.reshape(__lowercase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase = tf.reshape(__lowercase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case (self , __lowercase , __lowercase=ReturnType.TEXT , __lowercase=False ):
__lowerCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase = {F"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
__lowerCAmelCase = {
F"""{self.return_name}_text""": self.tokenizer.decode(
__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase , )
}
records.append(__lowercase )
return records
@add_end_docstrings(__A )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = 'summary'
def __call__(self , *__lowercase , **__lowercase ):
return super().__call__(*__lowercase , **__lowercase )
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
if max_length < min_length:
logger.warning(F"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
F"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
F"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(__A )
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : int = 'translation'
def _snake_case (self , __lowercase , __lowercase , __lowercase ):
if input_length > 0.9 * max_length:
logger.warning(
F"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def _snake_case (self , *__lowercase , __lowercase=TruncationStrategy.DO_NOT_TRUNCATE , __lowercase=None , __lowercase=None ):
if getattr(self.tokenizer , '''_build_translation_inputs''' , __lowercase ):
return self.tokenizer._build_translation_inputs(
*__lowercase , return_tensors=self.framework , truncation=__lowercase , src_lang=__lowercase , tgt_lang=__lowercase )
else:
return super()._parse_and_tokenize(*__lowercase , truncation=__lowercase )
def _snake_case (self , __lowercase=None , __lowercase=None , **__lowercase ):
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = super()._sanitize_parameters(**__lowercase )
if src_lang is not None:
__lowerCAmelCase = src_lang
if tgt_lang is not None:
__lowerCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
__lowerCAmelCase = kwargs.get('''task''' , self.task )
__lowerCAmelCase = task.split('''_''' )
if task and len(__lowercase ) == 4:
# translation, XX, to YY
__lowerCAmelCase = items[1]
__lowerCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self , *__lowercase , **__lowercase ):
return super().__call__(*__lowercase , **__lowercase )
| 174
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[str] = logging.get_logger(__name__)
_UpperCAmelCase : Tuple = """▁"""
_UpperCAmelCase : str = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
_UpperCAmelCase : Dict = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
_UpperCAmelCase : List[Any] = {"""vinai/bartpho-syllable""": 1_0_2_4}
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ['input_ids', 'attention_mask']
def __init__(self , __lowercase , __lowercase , __lowercase="<s>" , __lowercase="</s>" , __lowercase="</s>" , __lowercase="<s>" , __lowercase="<unk>" , __lowercase="<pad>" , __lowercase="<mask>" , __lowercase = None , **__lowercase , ):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCAmelCase = AddedToken(__lowercase , lstrip=__lowercase , rstrip=__lowercase ) if isinstance(__lowercase , __lowercase ) else mask_token
__lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__lowercase , eos_token=__lowercase , unk_token=__lowercase , sep_token=__lowercase , cls_token=__lowercase , pad_token=__lowercase , mask_token=__lowercase , sp_model_kwargs=self.sp_model_kwargs , **__lowercase , )
__lowerCAmelCase = vocab_file
__lowerCAmelCase = monolingual_vocab_file
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__lowercase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCAmelCase = {}
__lowerCAmelCase = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
__lowerCAmelCase = cnt
cnt += 1
with open(__lowercase , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
__lowerCAmelCase = line.strip().split()[0]
__lowerCAmelCase = len(self.fairseq_tokens_to_ids )
if str(__lowercase ) not in self.fairseq_tokens_to_ids:
__lowerCAmelCase = len(self.fairseq_tokens_to_ids )
__lowerCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__(self ):
__lowerCAmelCase = self.__dict__.copy()
__lowerCAmelCase = None
__lowerCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__(self , __lowercase ):
__lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__lowerCAmelCase = {}
__lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case (self , __lowercase , __lowercase = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
__lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case (self , __lowercase , __lowercase = None , __lowercase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def _snake_case (self , __lowercase , __lowercase = None ):
__lowerCAmelCase = [self.sep_token_id]
__lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case (self ):
return len(self.fairseq_ids_to_tokens )
def _snake_case (self ):
__lowerCAmelCase = {self.convert_ids_to_tokens(__lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case (self , __lowercase ):
return self.sp_model.encode(__lowercase , out_type=__lowercase )
def _snake_case (self , __lowercase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def _snake_case (self , __lowercase ):
return self.fairseq_ids_to_tokens[index]
def _snake_case (self , __lowercase ):
__lowerCAmelCase = ''''''.join(__lowercase ).replace(__lowercase , ''' ''' ).strip()
return out_string
def _snake_case (self , __lowercase , __lowercase = None ):
if not os.path.isdir(__lowercase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(
__lowercase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowercase , '''wb''' ) as fi:
__lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__lowercase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
__lowercase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , __lowercase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(__lowercase , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"""{str(__lowercase )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 174
| 1
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Any = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Any = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ):
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE = "lm_head"
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , UpperCAmelCase__ ).shape
else:
SCREAMING_SNAKE_CASE = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE = value
else:
SCREAMING_SNAKE_CASE = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE = name.split(UpperCAmelCase__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE = mapped_key.replace("*" , UpperCAmelCase__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE = "weight"
else:
SCREAMING_SNAKE_CASE = None
set_recursively(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase__ )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCamelCase (UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE = name.split("." )
SCREAMING_SNAKE_CASE = int(items[0] )
SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
SCREAMING_SNAKE_CASE = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(UpperCAmelCase__ )
@torch.no_grad()
def __lowerCamelCase (UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=True ):
if config_path is not None:
SCREAMING_SNAKE_CASE = UniSpeechConfig.from_pretrained(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE = Dictionary.load_from_json(UpperCAmelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE = target_dict.pad_index
SCREAMING_SNAKE_CASE = target_dict.bos_index
SCREAMING_SNAKE_CASE = target_dict.eos_index
SCREAMING_SNAKE_CASE = len(target_dict.symbols )
SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , "vocab.json" )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase__ ) )
return
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE = 4_2
SCREAMING_SNAKE_CASE = 4_3
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = WavaVecaPhonemeCTCTokenizer(
UpperCAmelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , )
SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ )
processor.save_pretrained(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = UniSpeechForCTC(UpperCAmelCase__ )
else:
SCREAMING_SNAKE_CASE = UniSpeechForPreTraining(UpperCAmelCase__ )
if is_finetuned:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
hf_unispeech.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 206
|
import contextlib
from multiprocessing import Pool, RLock
from tqdm.auto import tqdm
from ..utils import experimental, logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class lowercase :
lowercase__ : str = None
@experimental
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Any ):
if ParallelBackendConfig.backend_name is None:
return _map_with_multiprocessing_pool(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return _map_with_joblib(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __lowerCamelCase (UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ):
SCREAMING_SNAKE_CASE = num_proc if num_proc <= len(UpperCAmelCase__ ) else len(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = [] # We organize the splits ourselve (contiguous splits)
for index in range(UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) // num_proc
SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) % num_proc
SCREAMING_SNAKE_CASE = div * index + min(UpperCAmelCase__ , UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = start + div + (1 if index < mod else 0)
split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) )
if len(UpperCAmelCase__ ) != sum(len(i[1] ) for i in split_kwds ):
raise ValueError(
F"Error dividing inputs iterable among processes. "
F"Total number of objects {len(UpperCAmelCase__ )}, "
F"length: {sum(len(i[1] ) for i in split_kwds )}" )
logger.info(
F"Spawning {num_proc} processes for {len(UpperCAmelCase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}" )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None, None
if not disable_tqdm:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = (RLock(),), tqdm.set_lock
with Pool(UpperCAmelCase__ , initargs=UpperCAmelCase__ , initializer=UpperCAmelCase__ ) as pool:
SCREAMING_SNAKE_CASE = pool.map(UpperCAmelCase__ , UpperCAmelCase__ )
logger.info(F"Finished {num_proc} processes" )
SCREAMING_SNAKE_CASE = [obj for proc_res in mapped for obj in proc_res]
logger.info(F"Unpacked {len(UpperCAmelCase__ )} objects" )
return mapped
def __lowerCamelCase (UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any ):
# progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib,
# and it requires monkey-patching joblib internal classes which is subject to change
import joblib
with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=UpperCAmelCase__ ):
return joblib.Parallel()(
joblib.delayed(UpperCAmelCase__ )((function, obj, types, None, True, None) ) for obj in iterable )
@experimental
@contextlib.contextmanager
def __lowerCamelCase (UpperCAmelCase__ : str ):
SCREAMING_SNAKE_CASE = backend_name
if backend_name == "spark":
from joblibspark import register_spark
register_spark()
# TODO: call create_cache_and_write_probe if "download" in steps
# TODO: raise NotImplementedError when Dataset.map etc is called
try:
yield
finally:
SCREAMING_SNAKE_CASE = None
| 206
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : Any = {
"""openai/imagegpt-small""": """""",
"""openai/imagegpt-medium""": """""",
"""openai/imagegpt-large""": """""",
}
class __a (__snake_case ):
__a : Any = "imagegpt"
__a : List[str] = ["past_key_values"]
__a : int = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Dict , __magic_name__ : List[str]=5_12 + 1 , __magic_name__ : Dict=32 * 32 , __magic_name__ : str=5_12 , __magic_name__ : List[str]=24 , __magic_name__ : Optional[Any]=8 , __magic_name__ : List[str]=None , __magic_name__ : int="quick_gelu" , __magic_name__ : List[str]=0.1 , __magic_name__ : str=0.1 , __magic_name__ : Any=0.1 , __magic_name__ : Optional[Any]=1E-5 , __magic_name__ : Optional[Any]=0.0_2 , __magic_name__ : Optional[int]=True , __magic_name__ : Optional[Any]=True , __magic_name__ : List[Any]=False , __magic_name__ : Any=False , __magic_name__ : List[str]=False , **__magic_name__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = vocab_size
UpperCAmelCase_ : int = n_positions
UpperCAmelCase_ : int = n_embd
UpperCAmelCase_ : List[Any] = n_layer
UpperCAmelCase_ : Any = n_head
UpperCAmelCase_ : List[Any] = n_inner
UpperCAmelCase_ : Tuple = activation_function
UpperCAmelCase_ : Optional[Any] = resid_pdrop
UpperCAmelCase_ : str = embd_pdrop
UpperCAmelCase_ : List[str] = attn_pdrop
UpperCAmelCase_ : Dict = layer_norm_epsilon
UpperCAmelCase_ : List[Any] = initializer_range
UpperCAmelCase_ : Any = scale_attn_weights
UpperCAmelCase_ : List[str] = use_cache
UpperCAmelCase_ : int = scale_attn_by_inverse_layer_idx
UpperCAmelCase_ : Optional[int] = reorder_and_upcast_attn
UpperCAmelCase_ : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=a_ , **a_ )
class __a (__snake_case ):
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
] )
def UpperCAmelCase__ ( self : str , __magic_name__ : List[str] , __magic_name__ : Optional[Any] = 1 , __magic_name__ : str = -1 , __magic_name__ : Optional[Any] = False , __magic_name__ : Optional[Any] = None , __magic_name__ : List[str] = 3 , __magic_name__ : Optional[int] = 32 , __magic_name__ : Any = 32 , ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self._generate_dummy_images(a_ , a_ , a_ , a_ )
UpperCAmelCase_ : List[Any] = dict(preprocessor(images=a_ , return_tensors=a_ ) )
return inputs
| 125
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase ( _snake_case : Any ) ->Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
__snake_case : Dict = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
__snake_case : Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__snake_case : Tuple = [3, 3, 3, 3]
__snake_case : Dict = [5, 5, 5, 5]
elif "fl4" in model_name:
__snake_case : Any = [4, 4, 4, 4]
__snake_case : List[str] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__snake_case : Optional[int] = [3, 3, 3, 3]
if "lrf" in model_name:
__snake_case : Any = [3, 3, 3, 3]
else:
__snake_case : int = [2, 2, 2, 2]
if "tiny" in model_name:
__snake_case : str = 96
elif "small" in model_name:
__snake_case : Optional[int] = 96
elif "base" in model_name:
__snake_case : Any = 128
elif "large" in model_name:
__snake_case : Optional[Any] = 192
elif "xlarge" in model_name:
__snake_case : List[Any] = 256
elif "huge" in model_name:
__snake_case : Union[str, Any] = 352
# set label information
__snake_case : Union[str, Any] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
__snake_case : int = '''imagenet-22k-id2label.json'''
else:
__snake_case : Optional[Any] = '''imagenet-1k-id2label.json'''
__snake_case : int = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
__snake_case : Dict = {int(_snake_case ): v for k, v in idalabel.items()}
__snake_case : Optional[int] = {v: k for k, v in idalabel.items()}
__snake_case : Optional[Any] = FocalNetConfig(
embed_dim=_snake_case , depths=_snake_case , focal_levels=_snake_case , focal_windows=_snake_case , use_conv_embed=_snake_case , idalabel=_snake_case , labelaid=_snake_case , use_post_layernorm=_snake_case , use_layerscale=_snake_case , )
return config
def lowercase ( _snake_case : Dict ) ->List[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__snake_case : Tuple = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
__snake_case : Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
__snake_case : List[Any] = '''encoder.''' + name
if "encoder.layers" in name:
__snake_case : Optional[Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
__snake_case : Any = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
__snake_case : List[str] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__snake_case : Any = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__snake_case : List[Any] = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__snake_case : int = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
__snake_case : Optional[Any] = '''layernorm.weight'''
if name == "norm.bias":
__snake_case : List[str] = '''layernorm.bias'''
if "head" in name:
__snake_case : Union[str, Any] = name.replace('''head''' , '''classifier''' )
else:
__snake_case : int = '''focalnet.''' + name
return name
def lowercase ( _snake_case : Tuple , _snake_case : Dict , _snake_case : List[str]=False ) ->Any:
"""simple docstring"""
__snake_case : List[Any] = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
__snake_case : int = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , _snake_case )
__snake_case : int = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
__snake_case : str = state_dict.pop(_snake_case )
__snake_case : Tuple = val
__snake_case : Any = get_focalnet_config(_snake_case )
__snake_case : List[Any] = FocalNetForImageClassification(_snake_case )
model.eval()
# load state dict
model.load_state_dict(_snake_case )
# verify conversion
__snake_case : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__snake_case : Any = BitImageProcessor(
do_resize=_snake_case , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=_snake_case , crop_size=224 , do_normalize=_snake_case , image_mean=_snake_case , image_std=_snake_case , )
__snake_case : List[str] = Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
__snake_case : int = processor(images=_snake_case , return_tensors='''pt''' )
__snake_case : Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__snake_case : Optional[Any] = image_transforms(_snake_case ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , _snake_case , atol=1e-4 )
__snake_case : Tuple = model(**_snake_case )
__snake_case : str = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__snake_case : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
__snake_case : int = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
__snake_case : Optional[int] = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
__snake_case : List[Any] = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
__snake_case : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
__snake_case : List[str] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
processor.save_pretrained(_snake_case )
if push_to_hub:
print(f"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(f"""{model_name}""" )
processor.push_to_hub(f"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 102
| 0
|
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
_A = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _a (self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = True , ):
"""simple docstring"""
UpperCAmelCase__ : Dict = [file for file in os.listdir(_lowerCamelCase ) if os.path.isfile(os.path.join(_lowerCamelCase , _lowerCamelCase ) )]
if identifier is not None:
UpperCAmelCase__ : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for n_ in n_identifier:
UpperCAmelCase__ : Optional[int] = [file for file in files if n_ not in file]
else:
UpperCAmelCase__ : List[Any] = [file for file in files if n_identifier not in file]
UpperCAmelCase__ : str = ignore_files or []
ignore_files.append("""__init__.py""" )
UpperCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , _lowerCamelCase )
if only_modules:
UpperCAmelCase__ : List[str] = file.split(""".""" )[0]
try:
UpperCAmelCase__ : Any = getattr(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase__ : Optional[int] = doctest.DocTestSuite(_lowerCamelCase )
UpperCAmelCase__ : List[Any] = unittest.TextTestRunner().run(_lowerCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
UpperCAmelCase__ : Tuple = doctest.testfile(str("""..""" / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : Union[str, Any] = """modeling"""
UpperCAmelCase__ : int = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase , ignore_files=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = """tokenization"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = Path("""src/transformers""" )
UpperCAmelCase__ : Optional[int] = """configuration"""
self.analyze_directory(_lowerCamelCase , identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = Path("""src/transformers""" )
UpperCAmelCase__ : List[Any] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(_lowerCamelCase , n_identifier=_lowerCamelCase )
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = Path("""docs/source""" )
UpperCAmelCase__ : str = ["""favicon.ico"""]
self.analyze_directory(_lowerCamelCase , ignore_files=_lowerCamelCase , only_modules=_lowerCamelCase )
| 166
|
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['image_processor', 'tokenizer']
SCREAMING_SNAKE_CASE = 'OwlViTImageProcessor'
SCREAMING_SNAKE_CASE = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__(self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowerCamelCase , )
UpperCAmelCase__ : Optional[int] = kwargs.pop("""feature_extractor""" )
UpperCAmelCase__ : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__(self , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase="max_length" , _lowerCamelCase="np" , **_lowerCamelCase ):
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(_lowerCamelCase , _lowerCamelCase ) or (isinstance(_lowerCamelCase , _lowerCamelCase ) and not isinstance(text[0] , _lowerCamelCase )):
UpperCAmelCase__ : Any = [self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )]
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(text[0] , _lowerCamelCase ):
UpperCAmelCase__ : Any = []
# Maximum number of queries across batch
UpperCAmelCase__ : int = max([len(_lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCamelCase ) != max_num_queries:
UpperCAmelCase__ : Optional[int] = t + [""" """] * (max_num_queries - len(_lowerCamelCase ))
UpperCAmelCase__ : Union[str, Any] = self.tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
encodings.append(_lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
UpperCAmelCase__ : Optional[Any] = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Any = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
UpperCAmelCase__ : Any = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : List[str] = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
UpperCAmelCase__ : str = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
UpperCAmelCase__ : int = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
UpperCAmelCase__ : Any = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
UpperCAmelCase__ : Tuple = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
UpperCAmelCase__ : Dict = BatchEncoding()
UpperCAmelCase__ : int = input_ids
UpperCAmelCase__ : Optional[int] = attention_mask
if query_images is not None:
UpperCAmelCase__ : int = BatchEncoding()
UpperCAmelCase__ : Optional[int] = self.image_processor(
_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase ).pixel_values
UpperCAmelCase__ : List[Any] = query_pixel_values
if images is not None:
UpperCAmelCase__ : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
UpperCAmelCase__ : List[str] = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
UpperCAmelCase__ : int = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_object_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def _a (self , *_lowerCamelCase , **_lowerCamelCase ):
"""simple docstring"""
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowerCamelCase , )
return self.image_processor_class
@property
def _a (self ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowerCamelCase , )
return self.image_processor
| 166
| 1
|
import os
def A ( ) -> List[Any]:
with open(os.path.dirname(a_ ) + '/grid.txt' ) as f:
__UpperCamelCase : str =[] # noqa: E741
for _ in range(20 ):
l.append([int(a_ ) for x in f.readline().split()] )
__UpperCamelCase : List[Any] =0
# right
for i in range(20 ):
for j in range(17 ):
__UpperCamelCase : Tuple =l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
__UpperCamelCase : Dict =temp
# down
for i in range(17 ):
for j in range(20 ):
__UpperCamelCase : Optional[int] =l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
__UpperCamelCase : int =temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
__UpperCamelCase : Tuple =l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
__UpperCamelCase : int =temp
# diagonal 2
for i in range(17 ):
for j in range(3 ,20 ):
__UpperCamelCase : Optional[Any] =l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
__UpperCamelCase : Union[str, Any] =temp
return maximum
if __name__ == "__main__":
print(solution())
| 71
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ :Any = logging.get_logger(__name__)
A_ :int = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __A ( a ):
"""simple docstring"""
UpperCamelCase__ : Optional[int] ="""vit_msn"""
def __init__( self , lowerCamelCase__=768 , lowerCamelCase__=12 , lowerCamelCase__=12 , lowerCamelCase__=3072 , lowerCamelCase__="gelu" , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.02 , lowerCamelCase__=1E-06 , lowerCamelCase__=224 , lowerCamelCase__=16 , lowerCamelCase__=3 , lowerCamelCase__=True , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(**lowerCamelCase__ )
__UpperCamelCase : int =hidden_size
__UpperCamelCase : List[Any] =num_hidden_layers
__UpperCamelCase : Union[str, Any] =num_attention_heads
__UpperCamelCase : List[str] =intermediate_size
__UpperCamelCase : Union[str, Any] =hidden_act
__UpperCamelCase : str =hidden_dropout_prob
__UpperCamelCase : Union[str, Any] =attention_probs_dropout_prob
__UpperCamelCase : Union[str, Any] =initializer_range
__UpperCamelCase : Tuple =layer_norm_eps
__UpperCamelCase : Optional[Any] =image_size
__UpperCamelCase : Optional[int] =patch_size
__UpperCamelCase : Any =num_channels
__UpperCamelCase : str =qkv_bias
| 71
| 1
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
UpperCAmelCase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowercase ( self : Any ):
_snake_case = VersatileDiffusionImageVariationPipeline.from_pretrained('''shi-labs/versatile-diffusion''' )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
_snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
_snake_case = torch.manual_seed(0 )
_snake_case = pipe(
image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
_snake_case = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_snake_case = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 364
|
"""simple docstring"""
from __future__ import annotations
from random import random
class lowerCAmelCase__ :
def __init__( self : str , _lowerCamelCase : int | None = None ):
_snake_case = value
_snake_case = random()
_snake_case = None
_snake_case = None
def __repr__( self : int ):
from pprint import pformat
if self.left is None and self.right is None:
return f'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self : Optional[int] ):
_snake_case = str(self.value ) + ''' '''
_snake_case = str(self.left or '''''' )
_snake_case = str(self.right or '''''' )
return value + left + right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> tuple[Node | None, Node | None]:
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_snake_case , _snake_case = split(root.left , __lowerCamelCase )
return left, root
else:
_snake_case , _snake_case = split(root.right , __lowerCamelCase )
return root, right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : Node | None ) -> Node | None:
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_snake_case = merge(left.right , __lowerCamelCase )
return left
else:
_snake_case = merge(__lowerCamelCase , right.left )
return right
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case = Node(__lowerCamelCase )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(merge(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : int ) -> Node | None:
_snake_case , _snake_case = split(__lowerCamelCase , value - 1 )
_snake_case , _snake_case = split(__lowerCamelCase , __lowerCamelCase )
return merge(__lowerCamelCase , __lowerCamelCase )
def _UpperCAmelCase ( __lowerCamelCase : Node | None ) -> None:
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def _UpperCAmelCase ( __lowerCamelCase : Node | None , __lowerCamelCase : str ) -> Node | None:
for arg in args.split():
if arg[0] == "+":
_snake_case = insert(__lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_snake_case = erase(__lowerCamelCase , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def _UpperCAmelCase ( ) -> None:
_snake_case = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
_snake_case = input()
while args != "q":
_snake_case = interact_treap(__lowerCamelCase , __lowerCamelCase )
print(__lowerCamelCase )
_snake_case = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 40
| 0
|
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,A : Tuple ,A : str=13 ,A : List[str]=7 ,A : Dict=True ,A : Optional[Any]=True ,A : Union[str, Any]=False ,A : Optional[int]=True ,A : int=99 ,A : Any=32 ,A : int=5 ,A : Tuple=4 ,A : Optional[Any]=37 ,A : Dict="gelu" ,A : int=0.1 ,A : Optional[Any]=0.1 ,A : int=5_12 ,A : Tuple=16 ,A : Any=2 ,A : int=0.02 ,A : Optional[int]=3 ,A : str=4 ,A : Tuple=None ,):
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_input_mask
__A = use_token_type_ids
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_act
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = type_sequence_label_size
__A = initializer_range
__A = num_labels
__A = num_choices
__A = scope
def UpperCamelCase_ ( self : int ):
__A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__A = None
if self.use_input_mask:
__A = random_attention_mask([self.batch_size, self.seq_length] )
__A = None
if self.use_token_type_ids:
__A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__A = None
__A = None
__A = None
if self.use_labels:
__A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__A = ids_tensor([self.batch_size] ,self.num_choices )
__A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase_ ( self : Optional[int] ):
return BioGptConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=A ,initializer_range=self.initializer_range ,)
def UpperCamelCase_ ( self : int ,A : Optional[Any] ,A : Any ,A : Any ,A : Any ,A : int ,A : List[str] ,A : List[Any] ):
__A = BioGptModel(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A )
__A = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase_ ( self : Any ,A : Tuple ,A : str ,A : Optional[Any] ,A : str ,A : Dict ,A : Dict ,A : Tuple ,A : Union[str, Any] ,A : Tuple ,):
__A = BioGptForCausalLM(config=A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A ,labels=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase_ ( self : Union[str, Any] ,A : int ,A : Dict ,A : Optional[Any] ,A : List[Any] ,A : Any ,*A : Optional[Any] ):
__A = BioGptModel(config=A )
model.to(A )
model.eval()
# create attention mask
__A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A )
__A = self.seq_length // 2
__A = 0
# first forward pass
__A , __A = model(A ,attention_mask=A ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 1) ,config.vocab_size )
# change a random masked slice from input_ids
__A = ids_tensor((1,) ,A ).item() + 1
__A = ids_tensor((self.batch_size, 1) ,config.vocab_size ).squeeze(-1 )
__A = random_other_next_tokens
# append to next input_ids and attn_mask
__A = torch.cat([input_ids, next_tokens] ,dim=-1 )
__A = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) ,dtype=torch.long ,device=A )] ,dim=1 ,)
# get two different outputs
__A = model(A ,attention_mask=A )["last_hidden_state"]
__A = model(A ,past_key_values=A ,attention_mask=A )["last_hidden_state"]
# select random slice
__A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -1, random_slice_idx].detach()
__A = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) )
def UpperCamelCase_ ( self : Dict ,A : List[str] ,A : Dict ,A : Dict ,A : Tuple ,A : Optional[int] ,*A : int ):
__A = BioGptModel(config=A ).to(A ).eval()
__A = torch.ones(input_ids.shape ,dtype=torch.long ,device=A )
# first forward pass
__A = model(A ,attention_mask=A ,use_cache=A )
__A , __A = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
__A = ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
__A = torch.cat([input_ids, next_tokens] ,dim=-1 )
__A = torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
__A = model(A ,attention_mask=A )["last_hidden_state"]
__A = model(A ,attention_mask=A ,past_key_values=A )[
"last_hidden_state"
]
# select random slice
__A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
__A = output_from_no_past[:, -3:, random_slice_idx].detach()
__A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A ,A ,atol=1E-3 ) )
def UpperCamelCase_ ( self : Any ,A : Union[str, Any] ,A : List[Any] ,A : str ,A : List[Any] ,A : Optional[int] ,*A : str ,A : List[Any]=False ):
__A = BioGptForCausalLM(A )
model.to(A )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__A = model(A ,labels=A )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def UpperCamelCase_ ( self : Tuple ,A : str ,*A : List[Any] ):
__A = BioGptModel(A )
__A = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) ,0.0_01 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) ,0.01 )
def UpperCamelCase_ ( self : Dict ,A : str ,A : int ,A : str ,A : List[Any] ,A : str ,*A : Optional[Any] ):
__A = self.num_labels
__A = BioGptForTokenClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,token_type_ids=A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase_ ( self : str ):
__A = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) = config_and_inputs
__A = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
snake_case_ = (BioGptForCausalLM,) if is_torch_available() else ()
snake_case_ = (
{
"feature-extraction": BioGptModel,
"text-classification": BioGptForSequenceClassification,
"text-generation": BioGptForCausalLM,
"token-classification": BioGptForTokenClassification,
"zero-shot": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case_ = False
def UpperCamelCase_ ( self : int ):
__A = BioGptModelTester(self )
__A = ConfigTester(self ,config_class=A ,hidden_size=37 )
def UpperCamelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : List[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A = type
self.model_tester.create_and_check_model(*A )
def UpperCamelCase_ ( self : Optional[int] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*A )
def UpperCamelCase_ ( self : Dict ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*A ,gradient_checkpointing=A )
def UpperCamelCase_ ( self : Tuple ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*A )
def UpperCamelCase_ ( self : str ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*A )
@slow
def UpperCamelCase_ ( self : List[Any] ):
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
__A = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A = "left"
# Define PAD Token = EOS Token = 50256
__A = tokenizer.eos_token
__A = model.config.eos_token_id
# use different length sentences to test batching
__A = [
"Hello, my dog is a little",
"Today, I",
]
__A = tokenizer(A ,return_tensors="pt" ,padding=A )
__A = inputs["input_ids"].to(A )
__A = model.generate(
input_ids=A ,attention_mask=inputs["attention_mask"].to(A ) ,)
__A = tokenizer(sentences[0] ,return_tensors="pt" ).input_ids.to(A )
__A = model.generate(input_ids=A )
__A = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
__A = tokenizer(sentences[1] ,return_tensors="pt" ).input_ids.to(A )
__A = model.generate(input_ids=A ,max_length=model.config.max_length - num_paddings )
__A = tokenizer.batch_decode(A ,skip_special_tokens=A )
__A = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=A )
__A = tokenizer.decode(output_padded[0] ,skip_special_tokens=A )
__A = [
"Hello, my dog is a little bit bigger than a little bit.",
"Today, I have a good idea of how to use the information",
]
self.assertListEqual(A ,A )
self.assertListEqual(A ,[non_padded_sentence, padded_sentence] )
@slow
def UpperCamelCase_ ( self : str ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A = BioGptModel.from_pretrained(A )
self.assertIsNotNone(A )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = input_dict["input_ids"]
__A = input_ids.ne(1 ).to(A )
__A = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
__A = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase_ ( self : List[Any] ):
__A , __A = self.model_tester.prepare_config_and_inputs_for_common()
__A = 3
__A = "multi_label_classification"
__A = input_dict["input_ids"]
__A = input_ids.ne(1 ).to(A )
__A = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
__A = BioGptForSequenceClassification(A )
model.to(A )
model.eval()
__A = model(A ,attention_mask=A ,labels=A )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
__A = torch.tensor([[2, 48_05, 9, 6_56, 21]] )
__A = model(A )[0]
__A = 4_23_84
__A = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape ,A )
__A = torch.tensor(
[[[-9.52_36, -9.89_18, 10.45_57], [-11.04_69, -9.64_23, 8.10_22], [-8.86_64, -7.88_26, 5.53_25]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A ,atol=1E-4 ) )
@slow
def UpperCamelCase_ ( self : str ):
__A = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
__A = BioGptForCausalLM.from_pretrained("microsoft/biogpt" )
model.to(A )
torch.manual_seed(0 )
__A = tokenizer("COVID-19 is" ,return_tensors="pt" ).to(A )
__A = model.generate(
**A ,min_length=1_00 ,max_length=10_24 ,num_beams=5 ,early_stopping=A ,)
__A = tokenizer.decode(output_ids[0] ,skip_special_tokens=A )
__A = (
"COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the"
" causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and"
" territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),"
" and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and"
" more than 800,000 deaths."
)
self.assertEqual(A ,A )
| 15
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 206
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __lowerCAmelCase : Tuple ) -> list[int]:
return [ord(lowerCamelCase_ ) - 96 for elem in plain]
def __lowerCamelCase ( __lowerCAmelCase : int ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def __lowerCamelCase ( ) -> None:
snake_case = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , lowerCamelCase_ )
print("""Decoded:""" , decode(lowerCamelCase_ ) )
if __name__ == "__main__":
main()
| 367
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : list , __lowerCAmelCase : list , __lowerCAmelCase : int ) -> list:
snake_case = len(__lowerCAmelCase )
snake_case = [[0] * n for i in range(__lowerCAmelCase )]
for i in range(__lowerCAmelCase ):
snake_case = y_points[i]
for i in range(2 , __lowerCAmelCase ):
for j in range(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 3
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE : Any = ['image_processor', 'tokenizer']
__SCREAMING_SNAKE_CASE : Union[str, Any] = 'BlipImageProcessor'
__SCREAMING_SNAKE_CASE : List[Any] = ('BertTokenizer', 'BertTokenizerFast')
def __init__(self , lowercase , lowercase ):
A_ : List[Any] = False
super().__init__(lowercase , lowercase )
A_ : Tuple = self.image_processor
def __call__(self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None:
A_ : Optional[Any] = self.tokenizer
A_ : Tuple = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
A_ : int = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
A_ : Optional[Any] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
A_ : List[str] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def _a (self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def _a (self ):
A_ : int = self.tokenizer.model_input_names
A_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 206
|
'''simple docstring'''
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = VideoMAEConfig()
set_architecture_configs(lowerCamelCase__ , lowerCamelCase__ )
if "finetuned" not in model_name:
A_ : Dict = False
if "finetuned" in model_name:
A_ : List[Any] = """huggingface/label-files"""
if "kinetics" in model_name:
A_ : Dict = 4_00
A_ : List[str] = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
A_ : Tuple = 1_74
A_ : str = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
A_ : List[str] = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
A_ : Optional[Any] = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
return config
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if "small" in model_name:
A_ : int = 3_84
A_ : Union[str, Any] = 15_36
A_ : List[str] = 12
A_ : Optional[int] = 16
A_ : Any = 12
A_ : int = 3
A_ : Optional[Any] = 1_92
A_ : Union[str, Any] = 7_68
elif "large" in model_name:
A_ : List[Any] = 10_24
A_ : Optional[Any] = 40_96
A_ : Optional[Any] = 24
A_ : List[str] = 16
A_ : Any = 12
A_ : str = 8
A_ : str = 5_12
A_ : int = 20_48
elif "huge" in model_name:
A_ : Optional[Any] = 12_80
A_ : str = 51_20
A_ : str = 32
A_ : int = 16
A_ : Any = 12
A_ : Union[str, Any] = 8
A_ : Dict = 6_40
A_ : Optional[Any] = 25_60
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( lowerCamelCase__ ):
'''simple docstring'''
if "encoder." in name:
A_ : List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
A_ : List[str] = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
A_ : Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
A_ : int = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
A_ : Optional[Any] = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
A_ : Dict = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
A_ : List[str] = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
A_ : List[str] = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
A_ : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
A_ : str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
A_ : Union[str, Any] = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
A_ : Any = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
A_ : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
A_ : Dict = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
A_ : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
A_ : Optional[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
A_ : Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
A_ : Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A_ : Dict = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A_ : List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
A_ : Optional[Any] = name.replace("""head""" , """classifier""" )
return name
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A_ : str = orig_state_dict.pop(lowerCamelCase__ )
if key.startswith("""encoder.""" ):
A_ : Tuple = key.replace("""encoder.""" , """""" )
if "qkv" in key:
A_ : Optional[int] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
A_ : Union[str, Any] = config.decoder_hidden_size
A_ : Any = int(key_split[2] )
A_ : int = """decoder.decoder_layers."""
if "weight" in key:
A_ : Optional[Any] = val[:dim, :]
A_ : Any = val[dim : dim * 2, :]
A_ : Dict = val[-dim:, :]
else:
A_ : List[Any] = config.hidden_size
A_ : List[Any] = int(key_split[1] )
A_ : int = """videomae.encoder.layer."""
if "weight" in key:
A_ : Any = val[:dim, :]
A_ : Union[str, Any] = val[dim : dim * 2, :]
A_ : List[str] = val[-dim:, :]
else:
A_ : Union[str, Any] = val
return orig_state_dict
def a ( ):
'''simple docstring'''
A_ : List[Any] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
A_ : Optional[Any] = np.load(lowerCamelCase__ )
return list(lowerCamelCase__ )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Any = get_videomae_config(lowerCamelCase__ )
if "finetuned" in model_name:
A_ : List[str] = VideoMAEForVideoClassification(lowerCamelCase__ )
else:
A_ : Optional[Any] = VideoMAEForPreTraining(lowerCamelCase__ )
# download original checkpoint, hosted on Google Drive
A_ : Optional[Any] = """pytorch_model.bin"""
gdown.cached_download(lowerCamelCase__ , lowerCamelCase__ , quiet=lowerCamelCase__ )
A_ : Any = torch.load(lowerCamelCase__ , map_location="""cpu""" )
if "model" in files:
A_ : Any = files["""model"""]
else:
A_ : Dict = files["""module"""]
A_ : Any = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ )
model.load_state_dict(lowerCamelCase__ )
model.eval()
# verify model on basic input
A_ : int = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A_ : Union[str, Any] = prepare_video()
A_ : str = image_processor(lowerCamelCase__ , return_tensors="""pt""" )
if "finetuned" not in model_name:
A_ : List[str] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
A_ : Optional[Any] = torch.load(lowerCamelCase__ )
A_ : Dict = model(**lowerCamelCase__ )
A_ : List[Any] = outputs.logits
A_ : Any = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A_ : str = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([-0.9_291, -0.4_061, -0.9_307] )
elif model_name == "videomae-small-finetuned-ssv2":
A_ : str = torch.Size([1, 1_74] )
A_ : Union[str, Any] = torch.tensor([0.2_671, -0.4_689, -0.8_235] )
elif model_name == "videomae-base":
A_ : Tuple = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_739, 0.7_968, 0.7_089], [0.6_701, 0.7_487, 0.6_209], [0.4_287, 0.5_158, 0.4_773]] )
elif model_name == "videomae-base-short":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : List[str] = torch.tensor([[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]] )
# we verified the loss both for normalized and unnormalized targets for this one
A_ : List[Any] = torch.tensor([0.5_142] ) if config.norm_pix_loss else torch.tensor([0.6_469] )
elif model_name == "videomae-large":
A_ : str = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.7_149, 0.7_997, 0.6_966], [0.6_768, 0.7_869, 0.6_948], [0.5_139, 0.6_221, 0.5_605]] )
elif model_name == "videomae-large-finetuned-kinetics":
A_ : int = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.0_771, 0.0_011, -0.3_625] )
elif model_name == "videomae-huge-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Optional[int] = torch.tensor([0.2_433, 0.1_632, -0.4_894] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A_ : List[Any] = torch.Size([1, 4_00] )
A_ : Optional[Any] = torch.tensor([0.6_588, 0.0_990, -0.2_493] )
elif model_name == "videomae-base-finetuned-kinetics":
A_ : Union[str, Any] = torch.Size([1, 4_00] )
A_ : Tuple = torch.tensor([0.3_669, -0.0_688, -0.2_421] )
elif model_name == "videomae-base-short-ssv2":
A_ : Optional[Any] = torch.Size([1, 14_08, 15_36] )
A_ : List[Any] = torch.tensor([[0.4_712, 0.5_296, 0.5_786], [0.2_278, 0.2_729, 0.4_026], [0.0_352, 0.0_730, 0.2_506]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : Any = torch.tensor([-0.0_537, -0.1_539, -0.3_266] )
elif model_name == "videomae-base-ssv2":
A_ : Dict = torch.Size([1, 14_08, 15_36] )
A_ : Dict = torch.tensor([[0.8_131, 0.8_727, 0.8_546], [0.7_366, 0.9_377, 0.8_870], [0.5_935, 0.8_874, 0.8_564]] )
elif model_name == "videomae-base-finetuned-ssv2":
A_ : Any = torch.Size([1, 1_74] )
A_ : str = torch.tensor([0.1_961, -0.8_337, -0.6_389] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase__ , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A_ : Optional[int] = outputs.loss
assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCamelCase__ )
model.save_pretrained(lowerCamelCase__ )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(lowerCamelCase__ , organization="""nielsr""" )
if __name__ == "__main__":
lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4''',
type=str,
help=(
'''URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'''
''' download link.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/Users/nielsrogge/Documents/VideoMAE/Test''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--model_name''', default='''videomae-base''', type=str, help='''Name of the model.''')
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 206
| 1
|
'''simple docstring'''
class A :
def __init__( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a = name
_a = value
_a = weight
def __repr__( self : Dict ) -> int:
"""simple docstring"""
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def __lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
return self.value
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
return self.name
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.weight
def __lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return self.value / self.weight
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
'''simple docstring'''
_a = []
for i in range(len(UpperCamelCase ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
_a = sorted(UpperCamelCase , key=UpperCamelCase , reverse=UpperCamelCase )
_a = []
_a , _a = 0.0, 0.0
for i in range(len(UpperCamelCase ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def snake_case_ ():
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 179
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : str=None , ) -> int:
"""simple docstring"""
_a = parent
_a = batch_size
_a = image_size
_a = num_channels
_a = patch_size
_a = tubelet_size
_a = num_frames
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = mask_ratio
_a = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
_a = (image_size // patch_size) ** 2
_a = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
_a = int(mask_ratio * self.seq_length )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_a = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
_a = VideoMAEModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_a = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.batch_size , -1 ).bool()
_a = model(lowerCAmelCase_ , lowerCAmelCase_ )
# model only returns predictions for masked patches
_a = mask.sum().item()
_a = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( _a ,_a ,unittest.TestCase ):
lowercase_ = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
_a = VideoMAEModelTester(self )
_a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 )
def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Tuple:
"""simple docstring"""
_a = copy.deepcopy(lowerCAmelCase_ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
_a = torch.ones((self.model_tester.num_masks,) )
_a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
_a = mask.expand(self.model_tester.batch_size , -1 ).bool()
_a = bool_masked_pos.to(lowerCAmelCase_ )
if return_labels:
if model_class in [
*get_values(lowerCAmelCase_ ),
]:
_a = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ )
return inputs_dict
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def __lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(lowerCAmelCase_ )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ )
@slow
def __lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = VideoMAEModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def __lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
if not self.has_attentions:
pass
else:
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
for model_class in self.all_model_classes:
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
_a = True
_a = False
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_a = len(lowerCAmelCase_ )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) )
_a = outputs.attentions
self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ):
_a = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) )
_a = outputs.hidden_states
_a = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
_a = self.model_tester.seq_length - self.model_tester.num_masks
_a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
pass
def snake_case_ ():
'''simple docstring'''
_a = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
_a = np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def __lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
_a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase_ )
_a = self.default_image_processor
_a = prepare_video()
_a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ )
# add boolean mask, indicating which patches to mask
_a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' )
_a = torch.load(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
# verify the logits
_a = torch.Size([1, 14_08, 15_36] )
_a = torch.tensor(
[[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
_a = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
_a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase_ ).to(
lowerCAmelCase_ )
with torch.no_grad():
_a = model(**lowerCAmelCase_ )
_a = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
| 179
| 1
|
'''simple docstring'''
from collections import defaultdict
def _A ( _lowerCAmelCase ):
"""simple docstring"""
__lowercase =1
__lowercase =True
for v in tree[start]:
if v not in visited:
ret += dfs(_lowerCAmelCase )
if ret % 2 == 0:
cuts.append(_lowerCAmelCase )
return ret
def _A ( ):
"""simple docstring"""
dfs(1 )
if __name__ == "__main__":
lowerCamelCase , lowerCamelCase = 10, 9
lowerCamelCase = defaultdict(list)
lowerCamelCase = {}
lowerCamelCase = []
lowerCamelCase = 0
lowerCamelCase = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 166
|
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """OwlViTImageProcessor"""
lowerCAmelCase__ = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : List[str] , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : int=None , **_lowerCAmelCase : Any):
'''simple docstring'''
__lowercase =None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _lowerCAmelCase , )
__lowercase =kwargs.pop('feature_extractor')
__lowercase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(_lowerCAmelCase , _lowerCAmelCase)
def __call__( self : Dict , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : str=None , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : List[Any]="max_length" , _lowerCAmelCase : Optional[Any]="np" , **_lowerCAmelCase : Any):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.')
if text is not None:
if isinstance(_lowerCAmelCase , _lowerCAmelCase) or (isinstance(_lowerCAmelCase , _lowerCAmelCase) and not isinstance(text[0] , _lowerCAmelCase)):
__lowercase =[self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)]
elif isinstance(_lowerCAmelCase , _lowerCAmelCase) and isinstance(text[0] , _lowerCAmelCase):
__lowercase =[]
# Maximum number of queries across batch
__lowercase =max([len(_lowerCAmelCase) for t in text])
# Pad all batch samples to max number of text queries
for t in text:
if len(_lowerCAmelCase) != max_num_queries:
__lowercase =t + [' '] * (max_num_queries - len(_lowerCAmelCase))
__lowercase =self.tokenizer(_lowerCAmelCase , padding=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
encodings.append(_lowerCAmelCase)
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings')
if return_tensors == "np":
__lowercase =np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__lowercase =jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
__lowercase =torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0)
__lowercase =torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0)
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__lowercase =tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0)
__lowercase =tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0)
else:
raise ValueError('Target return tensor type could not be returned')
__lowercase =BatchEncoding()
__lowercase =input_ids
__lowercase =attention_mask
if query_images is not None:
__lowercase =BatchEncoding()
__lowercase =self.image_processor(
_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase).pixel_values
__lowercase =query_pixel_values
if images is not None:
__lowercase =self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase)
if text is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__lowercase =image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase) , tensor_type=_lowerCAmelCase)
def __lowerCamelCase ( self : int , *_lowerCAmelCase : Tuple , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[Any] , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[Any]):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any] , *_lowerCAmelCase : List[Any] , **_lowerCAmelCase : Any):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : List[str] , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[Any]):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase)
def __lowerCamelCase ( self : str , *_lowerCAmelCase : int , **_lowerCAmelCase : Dict):
'''simple docstring'''
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase)
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _lowerCAmelCase , )
return self.image_processor_class
@property
def __lowerCamelCase ( self : Any):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _lowerCAmelCase , )
return self.image_processor
| 166
| 1
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "bloom"
snake_case = ["past_key_values"]
snake_case = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _SCREAMING_SNAKE_CASE=25_0880 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=1e-5 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , )->List[str]:
'''simple docstring'''
A_ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
A_ : Tuple = kwargs.pop('''n_embed''' , _SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = hidden_size if n_embed is None else n_embed
A_ : List[Any] = n_layer
A_ : Optional[Any] = n_head
A_ : List[str] = layer_norm_epsilon
A_ : int = initializer_range
A_ : Any = use_cache
A_ : Optional[int] = pretraining_tp
A_ : Any = apply_residual_connection_post_layernorm
A_ : Any = hidden_dropout
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = bos_token_id
A_ : Optional[int] = eos_token_id
A_ : Union[str, Any] = slow_but_exact
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = version.parse("1.12" )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , )->int:
'''simple docstring'''
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , '''pad_token_id''' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
A_ : Optional[Any] = 0
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' , inverted_values_shape=_SCREAMING_SNAKE_CASE )
A_ : int = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
A_ : List[Any] = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return self._config.n_layer
@property
def _snake_case ( self )->int:
'''simple docstring'''
return self._config.n_head
@property
def _snake_case ( self )->float:
'''simple docstring'''
return 1e-3
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , )->Mapping[str, Any]:
'''simple docstring'''
A_ : List[str] = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
A_ : List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
A_ , A_ : List[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
A_ : Any = seqlen + 2
A_ : Union[str, Any] = self._config.hidden_size // self.num_attention_heads
A_ : Any = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
A_ : Dict = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
A_ : List[str] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
A_ : Union[str, Any] = common_inputs['''attention_mask''']
if self.use_past:
A_ : str = ordered_inputs['''attention_mask'''].dtype
A_ : Tuple = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 65
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class _lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self )->int:
'''simple docstring'''
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[Any] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
A_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
A_ : Union[str, Any] = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A_ : Tuple = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Any:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Tuple:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )->Dict:
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->List[str]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )->int:
'''simple docstring'''
A_ : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Optional[int] = self.get_tokenizer()
A_ : int = self.get_rust_tokenizer()
A_ : Optional[int] = self.get_image_processor()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
A_ : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
A_ : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A_ : int = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
A_ : Any = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
A_ : Optional[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : str = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Tuple = self.prepare_image_inputs()
A_ : Optional[Any] = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
A_ : Union[str, Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Union[str, Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : List[str] = '''lower newer'''
A_ : Tuple = processor(text=_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer(_SCREAMING_SNAKE_CASE , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )->List[Any]:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : List[str] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = '''lower newer'''
A_ : List[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : List[Any] = self.get_image_processor()
A_ : Optional[int] = self.get_tokenizer()
A_ : Any = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Tuple = processor.batch_decode(_SCREAMING_SNAKE_CASE )
A_ : int = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Dict = self.get_image_processor()
A_ : Tuple = self.get_tokenizer()
A_ : Optional[Any] = AlignProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
A_ : int = '''lower newer'''
A_ : Dict = self.prepare_image_inputs()
A_ : List[Any] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 65
| 1
|
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Tuple = AlbertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 102
|
"""simple docstring"""
import os
import sys
import unittest
__lowercase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__lowercase = os.path.join(git_repo_path, """src""", """diffusers""")
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Any):
a : List[Any] = find_backend(" if not is_torch_available():")
self.assertEqual(__UpperCAmelCase , "torch")
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
a : Dict = find_backend(" if not (is_torch_available() and is_transformers_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers")
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
a : int = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):")
self.assertEqual(__UpperCAmelCase , "torch_and_transformers_and_onnx")
def __snake_case ( self : Union[str, Any]):
a : Dict = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , __UpperCAmelCase)
self.assertIn("torch_and_transformers" , __UpperCAmelCase)
self.assertIn("flax_and_transformers" , __UpperCAmelCase)
self.assertIn("torch_and_transformers_and_onnx" , __UpperCAmelCase)
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"])
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"])
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"])
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"])
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"])
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"])
def __snake_case ( self : Tuple):
a : Optional[int] = create_dummy_object("CONSTANT" , "'torch'")
self.assertEqual(__UpperCAmelCase , "\nCONSTANT = None\n")
a : Dict = create_dummy_object("function" , "'torch'")
self.assertEqual(
__UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n")
a : Optional[Any] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
a : int = create_dummy_object("FakeClass" , "'torch'")
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[str] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
a : Tuple = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]})
self.assertEqual(dummy_files["torch"] , __UpperCAmelCase)
| 40
| 0
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : Optional[int] = (IPNDMScheduler,)
lowercase__ : List[str] = (('''num_inference_steps''', 50),)
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Union[str, Any]:
__magic_name__ : List[Any] = {"""num_train_timesteps""": 10_00}
config.update(**lowerCAmelCase__ )
return config
def __magic_name__ ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[int]:
__magic_name__ : Optional[int] = dict(self.forward_default_kwargs )
__magic_name__ : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowerCAmelCase__ )
__magic_name__ : List[str] = self.dummy_sample
__magic_name__ : List[Any] = 0.1 * sample
__magic_name__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__magic_name__ : Any = self.get_scheduler_config(**lowerCAmelCase__ )
__magic_name__ : Any = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
__magic_name__ : Optional[int] = dummy_past_residuals[:]
if time_step is None:
__magic_name__ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
__magic_name__ : Optional[int] = scheduler_class.from_pretrained(lowerCAmelCase__ )
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals
__magic_name__ : Optional[Any] = dummy_past_residuals[:]
__magic_name__ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : Optional[int] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__magic_name__ : List[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : List[Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self ) -> List[Any]:
pass
def __magic_name__ ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Tuple:
__magic_name__ : str = dict(self.forward_default_kwargs )
__magic_name__ : int = kwargs.pop("""num_inference_steps""" , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.dummy_sample
__magic_name__ : Dict = 0.1 * sample
__magic_name__ : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
__magic_name__ : List[Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**lowerCAmelCase__ )
scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ : str = dummy_past_residuals[:]
if time_step is None:
__magic_name__ : Optional[Any] = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = scheduler_class.from_pretrained(lowerCAmelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ : List[Any] = dummy_past_residuals[:]
__magic_name__ : Optional[int] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : Union[str, Any] = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
__magic_name__ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : Tuple = new_scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __magic_name__ ( self , **lowerCAmelCase__ ) -> Tuple:
__magic_name__ : Optional[int] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config(**lowerCAmelCase__ )
__magic_name__ : str = scheduler_class(**lowerCAmelCase__ )
__magic_name__ : str = 10
__magic_name__ : List[str] = self.dummy_model()
__magic_name__ : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : Union[str, Any] = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Optional[Any] = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__magic_name__ : int = model(lowerCAmelCase__ , lowerCAmelCase__ )
__magic_name__ : Tuple = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
return sample
def __magic_name__ ( self ) -> Dict:
__magic_name__ : Tuple = dict(self.forward_default_kwargs )
__magic_name__ : Tuple = kwargs.pop("""num_inference_steps""" , lowerCAmelCase__ )
for scheduler_class in self.scheduler_classes:
__magic_name__ : Optional[Any] = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**lowerCAmelCase__ )
__magic_name__ : int = self.dummy_sample
__magic_name__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase__ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCAmelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , """set_timesteps""" ):
__magic_name__ : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
__magic_name__ : List[str] = dummy_past_residuals[:]
__magic_name__ : int = scheduler.timesteps[5]
__magic_name__ : str = scheduler.timesteps[6]
__magic_name__ : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__magic_name__ : Any = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
__magic_name__ : str = scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __magic_name__ ( self ) -> Optional[Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase__ , time_step=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Optional[int]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=lowerCAmelCase__ , time_step=lowerCAmelCase__ )
def __magic_name__ ( self ) -> Union[str, Any]:
__magic_name__ : int = self.full_loop()
__magic_name__ : Optional[int] = torch.mean(torch.abs(lowerCAmelCase__ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 369
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__: List[Any] = {
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[str] = ["ConditionalDetrFeatureExtractor"]
__magic_name__: int = ["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__: List[Any] = [
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__magic_name__: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : str , lowercase__ : str ) -> int:
'''simple docstring'''
if len(lowercase__ ) != len(lowercase__ ):
raise ValueError("""String lengths must match!""" )
lowerCAmelCase_ :Optional[int] = 0
for chara, chara in zip(lowercase__ , lowercase__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 84
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowercase : Dict = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
A : Union[str, Any] = os.path.abspath(snake_case__ )
logger.info(F'Loading PyTorch weights from {pt_path}' )
A : Any = torch.load(snake_case__ , map_location='''cpu''' )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
A : List[str] = convert_pytorch_state_dict_to_flax(snake_case__ , snake_case__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
A : Any = convert_pytorch_sharded_state_dict_to_flax(snake_case__ , snake_case__ )
return flax_state_dict
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(snake_case__ ) -> bool:
return len(set(snake_case__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
A : Union[str, Any] = pt_tuple_key[:-1] + ('''scale''',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
A : Tuple = pt_tuple_key[:-1] + ('''mean''',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
A : Dict = pt_tuple_key[:-1] + ('''var''',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
A : Any = pt_tuple_key[:-1] + ('''embedding''',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(snake_case__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
A : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
A : Optional[int] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(snake_case__ ):
A : str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A : Dict = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A : List[Any] = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
A : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
A : List[Any] = pt_tuple_key[-2] + '''_g'''
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
A : List[str] = pt_tuple_key[-2] + '''_v'''
if name is not None:
A : int = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
A : int = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
A : List[str] = flax_model.params['''params''']
else:
A : Dict = flax_model.params
A : List[Any] = flatten_dict(snake_case__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : List[str] = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(snake_case__ )
A : int = {}
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : int = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Any = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Dict = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : Any = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
A : Tuple = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : List[str] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
import torch
# Load the index
A : Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
A : List[str] = torch.load(snake_case__ )
A : int = {k: v.numpy() for k, v in pt_state_dict.items()}
A : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
A : Optional[int] = flax_model.params['''params''']
A : List[Any] = flatten_dict(snake_case__ )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
A : Dict = flax_model.params
A : Tuple = flatten_dict(snake_case__ )
A : List[str] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
A : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A : int = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
A : List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
A : Union[str, Any] = pt_tuple_key[1:]
# Correctly rename weight parameters
A, A : Any = rename_key_and_reshape_tensor(
snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# add model prefix if necessary
A : int = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
A : int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
if "var" in flax_key[-1]:
A : Optional[int] = jnp.asarray(snake_case__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(snake_case__ , snake_case__ )
continue
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
else:
# also add unexpected weight so that warning is thrown
A : Optional[Any] = jnp.asarray(snake_case__ )
return unflatten_dict(snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = os.path.abspath(snake_case__ )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
A : List[str] = getattr(snake_case__ , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(snake_case__ , '''rb''' ) as state_f:
try:
A : int = from_bytes(snake_case__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
A : List[str] = flatten_dict(jax.tree_util.tree_map(lambda snake_case__ : x.dtype == jnp.bfloataa , snake_case__ ) ).values()
if any(snake_case__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
A : Optional[Any] = jax.tree_util.tree_map(
lambda snake_case__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , snake_case__ )
A : Union[str, Any] = flatten_dict(snake_case__ )
A : List[Any] = pt_model.state_dict()
A : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
A : Tuple = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
A : int = []
A : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
A : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
A : int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
A : List[str] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
A : Optional[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(snake_case__ ) not in pt_model_dict:
# conv layer
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
A : Optional[int] = jnp.transpose(snake_case__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(snake_case__ ) not in pt_model_dict:
# linear layer
A : Tuple = flax_key_tuple[:-1] + ('''weight''',)
A : Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
A : Optional[int] = flax_key_tuple[:-1] + ('''weight''',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_mean''',)
elif "var" in flax_key_tuple[-1]:
A : Tuple = flax_key_tuple[:-1] + ('''running_var''',)
if "batch_stats" in flax_state:
A : List[Any] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
A : Union[str, Any] = '''.'''.join(snake_case__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
A : int = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
A : Optional[int] = key.split('''.''' )
A : Dict = None
if key_components[-3::2] == ["parametrizations", "original0"]:
A : List[str] = key_components[-2] + '''_g'''
elif key_components[-3::2] == ["parametrizations", "original1"]:
A : List[Any] = key_components[-2] + '''_v'''
if name is not None:
A : str = key_components[:-3] + [name]
A : Optional[Any] = '''.'''.join(snake_case__ )
A : Optional[Any] = key
if flax_key in special_pt_names:
A : Optional[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
A : Dict = np.asarray(snake_case__ ) if not isinstance(snake_case__ , np.ndarray ) else flax_tensor
A : Dict = torch.from_numpy(snake_case__ )
# remove from missing keys
missing_keys.remove(snake_case__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(snake_case__ )
pt_model.load_state_dict(snake_case__ )
# re-transform missing_keys to list
A : List[Any] = list(snake_case__ )
if len(snake_case__ ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(snake_case__ ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 3
| 0
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCAmelCase_ = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def snake_case( __magic_name__ , __magic_name__=False ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = create_model(
'''HTSAT-tiny''' , '''roberta''' , UpperCamelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=UpperCamelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def snake_case( __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple = {}
lowercase : Dict = r'''.*sequential.(\d+).*'''
lowercase : List[str] = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase : Dict = key.replace(UpperCamelCase__ , UpperCamelCase__ )
if re.match(UpperCamelCase__ , UpperCamelCase__ ):
# replace sequential layers with list
lowercase : List[Any] = re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 )
lowercase : int = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(UpperCamelCase__ )//3}.linear.""" )
elif re.match(UpperCamelCase__ , UpperCamelCase__ ):
lowercase : Optional[Any] = int(re.match(UpperCamelCase__ , UpperCamelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
lowercase : Tuple = 1 if projecton_layer == 0 else 2
lowercase : int = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
lowercase : Optional[Any] = value
lowercase : int = mixed_qkv.size(0 ) // 3
lowercase : Any = mixed_qkv[:qkv_dim]
lowercase : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
lowercase : List[Any] = mixed_qkv[qkv_dim * 2 :]
lowercase : Dict = query_layer
lowercase : List[Any] = key_layer
lowercase : int = value_layer
else:
lowercase : List[str] = value
return model_state_dict
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ) -> Optional[int]:
'''simple docstring'''
lowercase , lowercase : Any = init_clap(UpperCamelCase__ , enable_fusion=UpperCamelCase__ )
clap_model.eval()
lowercase : List[str] = clap_model.state_dict()
lowercase : Dict = rename_state_dict(UpperCamelCase__ )
lowercase : List[Any] = ClapConfig()
lowercase : str = enable_fusion
lowercase : Tuple = ClapModel(UpperCamelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
transformers_config.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
lowerCAmelCase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 367
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
lowerCAmelCase_ = {
'moussaKam/mbarthez': 10_24,
'moussaKam/barthez': 10_24,
'moussaKam/barthez-orangesum-title': 10_24,
}
lowerCAmelCase_ = '▁'
class _A ( _lowerCamelCase ):
_UpperCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : Any , _A : Optional[int] , _A : List[str]="<s>" , _A : Tuple="</s>" , _A : Dict="</s>" , _A : Dict="<s>" , _A : List[str]="<unk>" , _A : str="<pad>" , _A : Any="<mask>" , _A : Optional[Dict[str, Any]] = None , **_A : Union[str, Any] , ) -> None:
"""simple docstring"""
lowercase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
lowercase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
lowercase : Any = vocab_file
lowercase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
lowercase : Optional[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowercase : Tuple = len(self.sp_model ) - 1
lowercase : Any = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __a ( self : Tuple , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
lowercase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __a ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowercase : List[str] = [self.sep_token_id]
lowercase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return len(self.sp_model )
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
lowercase : int = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self : Union[str, Any] , _A : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_A , out_type=_A )
def __a ( self : Optional[int] , _A : str ) -> Dict:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : Optional[int] = self.sp_model.PieceToId(_A )
return spm_id if spm_id else self.unk_token_id
def __a ( self : Any , _A : List[str] ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_A )
def __a ( self : Any , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Dict = []
lowercase : Any = ''''''
lowercase : str = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
lowercase : int = True
lowercase : Optional[Any] = []
else:
current_sub_tokens.append(_A )
lowercase : List[str] = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def __getstate__( self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase : str = self.__dict__.copy()
lowercase : Any = None
return state
def __setstate__( self : Optional[int] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : Union[str, Any] = {}
lowercase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __a ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase : Any = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , '''wb''' ) as fi:
lowercase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
| 116
| 0
|
"""simple docstring"""
from __future__ import annotations
import pandas as pd
def __lowercase ( snake_case_ : list[int] ,snake_case_ : list[int] ,snake_case_ : int ) ->list[int]:
'''simple docstring'''
__A : Any = [0] * no_of_processes
__A : int = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(snake_case_ ):
__A : Any = burst_time[i]
__A : Tuple = 0
__A : Tuple = 0
__A : Optional[int] = 999999999
__A : Optional[Any] = 0
__A : Dict = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(snake_case_ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
__A : str = remaining_time[j]
__A : Tuple = j
__A : str = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
__A : Any = remaining_time[short]
if minm == 0:
__A : Optional[int] = 999999999
if remaining_time[short] == 0:
complete += 1
__A : Optional[Any] = False
# Find finish time of current process
__A : Union[str, Any] = increment_time + 1
# Calculate waiting time
__A : Optional[int] = finish_time - arrival_time[short]
__A : List[Any] = finar - burst_time[short]
if waiting_time[short] < 0:
__A : Dict = 0
# Increment time
increment_time += 1
return waiting_time
def __lowercase ( snake_case_ : list[int] ,snake_case_ : int ,snake_case_ : list[int] ) ->list[int]:
'''simple docstring'''
__A : Dict = [0] * no_of_processes
for i in range(snake_case_ ):
__A : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowercase ( snake_case_ : list[int] ,snake_case_ : list[int] ,snake_case_ : int ) ->None:
'''simple docstring'''
__A : List[str] = 0
__A : Union[str, Any] = 0
for i in range(snake_case_ ):
__A : Optional[Any] = total_waiting_time + waiting_time[i]
__A : List[str] = total_turn_around_time + turn_around_time[i]
print(F"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" )
print('''Average turn around time =''' ,total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
a_ = int(input())
a_ = [0] * no_of_processes
a_ = [0] * no_of_processes
a_ = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
a_ , a_ = map(int, input().split())
a_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a_ = burst_time
a_ = no_of_processes
a_ = waiting_time
a_ = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
a_ = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 179
|
"""simple docstring"""
def __lowercase ( snake_case_ : dict ) ->set:
'''simple docstring'''
__A : List[str] = set()
# edges = list of graph's edges
__A : Optional[int] = get_edges(snake_case_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
__A , __A : str = edges.pop()
chosen_vertices.add(snake_case_ )
chosen_vertices.add(snake_case_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(snake_case_ )
return chosen_vertices
def __lowercase ( snake_case_ : dict ) ->set:
'''simple docstring'''
__A : Tuple = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 179
| 1
|
from datasets.utils.patching import _PatchedModuleObj, patch_submodule
from . import _test_patching
def A ( ):
import os as original_os
from os import path as original_path
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
SCREAMING_SNAKE_CASE : Optional[Any] = '''__test_patch_submodule_mock__'''
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
# Every way to access os.path.join must be patched, and the rest must stay untouched
# check os.path.join
assert isinstance(_test_patching.os , _PatchedModuleObj )
assert isinstance(_test_patching.os.path , _PatchedModuleObj )
assert _test_patching.os.path.join is mock
# check path.join
assert isinstance(_test_patching.path , _PatchedModuleObj )
assert _test_patching.path.join is mock
# check join
assert _test_patching.join is mock
# check that the other attributes are untouched
assert _test_patching.os.rename is original_rename
assert _test_patching.path.dirname is original_dirname
assert _test_patching.os.path.dirname is original_dirname
# Even renamed modules or objects must be patched
# check renamed_os.path.join
assert isinstance(_test_patching.renamed_os , _PatchedModuleObj )
assert isinstance(_test_patching.renamed_os.path , _PatchedModuleObj )
assert _test_patching.renamed_os.path.join is mock
# check renamed_path.join
assert isinstance(_test_patching.renamed_path , _PatchedModuleObj )
assert _test_patching.renamed_path.join is mock
# check renamed_join
assert _test_patching.renamed_join is mock
# check that the other attributes are untouched
assert _test_patching.renamed_os.rename is original_rename
assert _test_patching.renamed_path.dirname is original_dirname
assert _test_patching.renamed_os.path.dirname is original_dirname
# check that everthing is back to normal when the patch is over
assert _test_patching.os is original_os
assert _test_patching.path is original_path
assert _test_patching.join is original_join
assert _test_patching.renamed_os is original_os
assert _test_patching.renamed_path is original_path
assert _test_patching.renamed_join is original_join
def A ( ):
assert _test_patching.open is open
SCREAMING_SNAKE_CASE : Tuple = '''__test_patch_submodule_builtin_mock__'''
# _test_patching has "open" in its globals
assert _test_patching.open is open
with patch_submodule(_test_patching , '''open''' , _lowercase ):
assert _test_patching.open is mock
# check that everthing is back to normal when the patch is over
assert _test_patching.open is open
def A ( ):
# pandas.read_csv is not present in _test_patching
SCREAMING_SNAKE_CASE : Any = '''__test_patch_submodule_missing_mock__'''
with patch_submodule(_test_patching , '''pandas.read_csv''' , _lowercase ):
pass
def A ( ):
# builtin should always be mocked even if they're not in the globals
# in case they're loaded at one point
SCREAMING_SNAKE_CASE : Union[str, Any] = '''__test_patch_submodule_missing_builtin_mock__'''
# _test_patching doesn't have "len" in its globals
assert getattr(_test_patching , '''len''' , _lowercase ) is None
with patch_submodule(_test_patching , '''len''' , _lowercase ):
assert _test_patching.len is mock
assert _test_patching.len is len
def A ( ):
SCREAMING_SNAKE_CASE : Optional[Any] = '''__test_patch_submodule_start_and_stop_mock__'''
SCREAMING_SNAKE_CASE : int = patch_submodule(_test_patching , '''open''' , _lowercase )
assert _test_patching.open is open
patch.start()
assert _test_patching.open is mock
patch.stop()
assert _test_patching.open is open
def A ( ):
from os import rename as original_rename
from os.path import dirname as original_dirname
from os.path import join as original_join
SCREAMING_SNAKE_CASE : Optional[Any] = '''__test_patch_submodule_successive_join__'''
SCREAMING_SNAKE_CASE : Any = '''__test_patch_submodule_successive_dirname__'''
SCREAMING_SNAKE_CASE : Optional[int] = '''__test_patch_submodule_successive_rename__'''
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
with patch_submodule(_test_patching , '''os.rename''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
# try another order
with patch_submodule(_test_patching , '''os.rename''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.join''' , _lowercase ):
with patch_submodule(_test_patching , '''os.path.dirname''' , _lowercase ):
assert _test_patching.os.path.join is mock_join
assert _test_patching.os.path.dirname is mock_dirname
assert _test_patching.os.rename is mock_rename
assert _test_patching.os.path.join is original_join
assert _test_patching.os.path.dirname is original_dirname
assert _test_patching.os.rename is original_rename
def A ( ):
SCREAMING_SNAKE_CASE : int = '''__test_patch_submodule_doesnt_exist_mock__'''
with patch_submodule(_test_patching , '''__module_that_doesn_exist__.__attribute_that_doesn_exist__''' , _lowercase ):
pass
with patch_submodule(_test_patching , '''os.__attribute_that_doesn_exist__''' , _lowercase ):
pass
| 258
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( UpperCamelCase_ , unittest.TestCase):
UpperCamelCase_ = KandinskyInpaintPipeline
UpperCamelCase_ = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
UpperCamelCase_ = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
UpperCamelCase_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase_ = False
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 32
@property
def __A ( self : List[str] ):
'''simple docstring'''
return 32
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def __A ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __A ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
SCREAMING_SNAKE_CASE : Any = MultilingualCLIP(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : int = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE : str = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def __A ( self : int ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE : Dict = self.dummy_tokenizer
SCREAMING_SNAKE_CASE : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE : int = self.dummy_movq
SCREAMING_SNAKE_CASE : List[str] = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Any = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=0 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase__ )
# create init_image
SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE : Optional[int] = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
SCREAMING_SNAKE_CASE : Tuple = np.ones((64, 64) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : List[Any] = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE : Dict = torch.manual_seed(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : Dict = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = '''cpu'''
SCREAMING_SNAKE_CASE : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE : str = self.pipeline_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = output.images
SCREAMING_SNAKE_CASE : Any = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
SCREAMING_SNAKE_CASE : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE : int = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __A ( self : str ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase):
def __A ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
SCREAMING_SNAKE_CASE : Optional[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE : int = np.ones((768, 768) , dtype=np.floataa )
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : Optional[Any] = '''a hat'''
SCREAMING_SNAKE_CASE : Dict = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(
UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
SCREAMING_SNAKE_CASE : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 258
| 1
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Optional[Any] = 'informer'
__UpperCAmelCase : List[Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__(self : Union[str, Any] , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : str = "student_t" , __UpperCAmelCase : str = "nll" , __UpperCAmelCase : int = 1 , __UpperCAmelCase : List[int] = None , __UpperCAmelCase : Optional[Union[str, bool]] = "mean" , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : int = 6_4 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : int = 3_2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : int = 2 , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "gelu" , __UpperCAmelCase : float = 0.05 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : float = 0.1 , __UpperCAmelCase : int = 1_0_0 , __UpperCAmelCase : float = 0.02 , __UpperCAmelCase : Any=True , __UpperCAmelCase : str = "prob" , __UpperCAmelCase : int = 5 , __UpperCAmelCase : bool = True , **__UpperCAmelCase : Optional[Any] , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = prediction_length
UpperCAmelCase__ = context_length or prediction_length
UpperCAmelCase__ = distribution_output
UpperCAmelCase__ = loss
UpperCAmelCase__ = input_size
UpperCAmelCase__ = num_time_features
UpperCAmelCase__ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase__ = scaling
UpperCAmelCase__ = num_dynamic_real_features
UpperCAmelCase__ = num_static_real_features
UpperCAmelCase__ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase__ = cardinality
else:
UpperCAmelCase__ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(__UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
UpperCAmelCase__ = embedding_dimension
else:
UpperCAmelCase__ = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase__ = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase__ = input_size * len(self.lags_sequence ) + self._number_of_features
UpperCAmelCase__ = d_model
UpperCAmelCase__ = encoder_attention_heads
UpperCAmelCase__ = decoder_attention_heads
UpperCAmelCase__ = encoder_ffn_dim
UpperCAmelCase__ = decoder_ffn_dim
UpperCAmelCase__ = encoder_layers
UpperCAmelCase__ = decoder_layers
UpperCAmelCase__ = dropout
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = activation_dropout
UpperCAmelCase__ = encoder_layerdrop
UpperCAmelCase__ = decoder_layerdrop
UpperCAmelCase__ = activation_function
UpperCAmelCase__ = init_std
UpperCAmelCase__ = use_cache
# Informer
UpperCAmelCase__ = attention_type
UpperCAmelCase__ = sampling_factor
UpperCAmelCase__ = distil
super().__init__(is_encoder_decoder=__UpperCAmelCase , **__UpperCAmelCase )
@property
def lowercase_ (self : Union[str, Any] ) -> int:
"""simple docstring"""
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 65
|
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
UpperCamelCase__ = np.linspace(start=0, stop=7_5, num=7_5, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
UpperCamelCase__ = [0, 2_5, 5_0]
UpperCamelCase__ = [2_5, 5_0, 7_5]
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
UpperCamelCase__ = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
UpperCamelCase__ = np.ones(7_5)
UpperCamelCase__ = np.zeros((7_5,))
# 1. Union = max(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
UpperCamelCase__ = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
UpperCamelCase__ = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
UpperCamelCase__ = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
UpperCamelCase__ = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
UpperCamelCase__ = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('Young')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('Middle aged')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('union')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('intersection')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('complement_a')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('difference a/b')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('alg_sum')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('alg_product')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('bdd_sum')
plt.grid(True)
plt.subplot(4, 3, 1_0)
plt.plot(X, bdd_difference)
plt.title('bdd_difference')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 65
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__A = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : pyspark.sql.DataFrame , __UpperCAmelCase : Optional[NamedSplit] = None , __UpperCAmelCase : Optional[Features] = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : str = None , __UpperCAmelCase : bool = True , __UpperCAmelCase : str = "arrow" , **__UpperCAmelCase : str , ) -> Dict:
super().__init__(
split=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase , streaming=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase_= load_from_cache_file
UpperCAmelCase_= file_format
UpperCAmelCase_= Spark(
df=__UpperCAmelCase , features=__UpperCAmelCase , cache_dir=__UpperCAmelCase , working_dir=__UpperCAmelCase , **__UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
UpperCAmelCase_= None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=__UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 277
| 0
|
'''simple docstring'''
from PIL import Image
def lowercase__ ( __lowercase : Any , __lowercase : List[Any] ) -> Image:
"""simple docstring"""
__UpperCamelCase = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__lowercase : Optional[Any] ) -> int:
return int(128 + factor * (c - 128) )
return img.point(_UpperCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
a__ : Optional[Any] =change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 53
|
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__A : Tuple = logging.get_logger(__name__)
__A : List[Any] = R'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : str ):
raise NotImplementedError('StoppingCriteria needs to be subclassed' )
class __A ( lowerCAmelCase ):
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = None ):
lowerCAmelCase : str = max_length
lowerCAmelCase : Any = max_position_embeddings
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Any , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[Any] = input_ids.shape[-1]
lowerCAmelCase : List[Any] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'This is a friendly reminder - the current text generation call will exceed the model\'s predefined '
f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
'exceptions, performance degradation, or nothing at all.' )
return is_done
class __A ( lowerCAmelCase ):
def __init__( self : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
warnings.warn(
'The class `MaxNewTokensCriteria` is deprecated. '
f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
'with `max_length = start_length + max_new_tokens` instead.' , UpperCAmelCase_ , )
lowerCAmelCase : Optional[Any] = start_length
lowerCAmelCase : List[Any] = max_new_tokens
lowerCAmelCase : Union[str, Any] = start_length + max_new_tokens
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Dict , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[int] ):
return input_ids.shape[-1] >= self.max_length
class __A ( lowerCAmelCase ):
def __init__( self : List[Any] , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[float] = None ):
lowerCAmelCase : List[str] = max_time
lowerCAmelCase : Optional[Any] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : List[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : List[Any] ):
return time.time() - self.initial_timestamp > self.max_time
class __A ( lowerCAmelCase ):
@add_start_docstrings(UpperCAmelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase_ : torch.LongTensor , UpperCAmelCase_ : torch.FloatTensor , **UpperCAmelCase_ : Optional[Any] ):
return any(criteria(UpperCAmelCase_ , UpperCAmelCase_ ) for criteria in self )
@property
def lowercase__ ( self : Tuple ):
for stopping_criterium in self:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
return stopping_criterium.max_length
return None
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase, _UpperCAmelCase ) -> StoppingCriteriaList:
'''simple docstring'''
lowerCAmelCase : Dict = stopping_criteria.max_length
lowerCAmelCase : Dict = deepcopy(_UpperCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('You set different `max_length` for stopping criteria and `max_length` parameter', _UpperCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_UpperCAmelCase ) )
return new_stopping_criteria
| 138
| 0
|
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
if n == 1 or not isinstance(lowercase , lowercase ):
return 0
elif n == 2:
return 1
else:
lowerCamelCase_ = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = 0
lowerCamelCase_ = 2
while digits < n:
index += 1
lowerCamelCase_ = len(str(fibonacci(lowercase ) ) )
return index
def _SCREAMING_SNAKE_CASE ( lowercase : int = 10_00 ):
'''simple docstring'''
return fibonacci_digits_index(lowercase )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 363
|
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
lowerCamelCase : str = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def _SCREAMING_SNAKE_CASE ( lowercase : Any , lowercase : str , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
lowerCamelCase_ = TOKENIZER_CLASSES
else:
lowerCamelCase_ = {tokenizer_name: getattr(lowercase , tokenizer_name + 'Fast' )}
logger.info(f"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
lowerCamelCase_ = TOKENIZER_CLASSES[tokenizer_name]
lowerCamelCase_ = True
if checkpoint_name is None:
lowerCamelCase_ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
lowerCamelCase_ = [checkpoint_name]
logger.info(f"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(f"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
lowerCamelCase_ = tokenizer_class.from_pretrained(lowercase , force_download=lowercase )
# Save fast tokenizer
logger.info(f"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
lowerCamelCase_ , lowerCamelCase_ = checkpoint.split('/' )
lowerCamelCase_ = os.path.join(lowercase , lowercase )
elif add_prefix:
lowerCamelCase_ = checkpoint
lowerCamelCase_ = dump_path
else:
lowerCamelCase_ = None
lowerCamelCase_ = dump_path
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
lowerCamelCase_ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
lowerCamelCase_ = file_path.split(lowercase )[-1][0]
if next_char == "/":
lowerCamelCase_ = os.path.join(lowercase , lowercase )
lowerCamelCase_ = None
logger.info(f"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
lowerCamelCase_ = tokenizer.save_pretrained(
lowercase , legacy_format=lowercase , filename_prefix=lowercase )
logger.info(f"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(lowercase )
logger.info(f"""=> removing {file_name}""" )
if __name__ == "__main__":
lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 208
| 0
|
from pathlib import Path
import fire
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Path(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = Path(_lowerCamelCase )
dest_dir.mkdir(exist_ok=_lowerCamelCase )
for path in src_dir.iterdir():
_lowerCAmelCase : Dict = [x.rstrip() for x in list(path.open().readlines() )][:n]
_lowerCAmelCase : Union[str, Any] = dest_dir.joinpath(path.name )
print(_lowerCamelCase )
dest_path.open("w" ).write("\n".join(_lowerCamelCase ) )
if __name__ == "__main__":
fire.Fire(minify)
| 36
|
def __UpperCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> int:
"""simple docstring"""
while second != 0:
A : int = first & second
first ^= second
A : Tuple = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_:int = int(input("""Enter the first number: """).strip())
SCREAMING_SNAKE_CASE_:Optional[int] = int(input("""Enter the second number: """).strip())
print(F"""{add(first, second) = }""")
| 116
| 0
|
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
__a :Union[str, Any] = logging.getLogger(__name__)
def __snake_case ( ):
"""simple docstring"""
A_ = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" ,type=__UpperCamelCase ,default="wikitext" ,help="Name of the training. Explore datasets at: hf.co/datasets." ,)
parser.add_argument(
"--dataset_config" ,type=__UpperCamelCase ,default="wikitext-103-raw-v1" ,help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" ,type=__UpperCamelCase ,default="sayakpaul/unigram-tokenizer-wikitext" ,help="Tokenizer identifier. Can be a local filepath or a Hub identifier." ,)
parser.add_argument(
"--shard_size" ,type=__UpperCamelCase ,default=1000 ,help="Number of entries to go in a single shard." ,)
parser.add_argument("--split" ,type=__UpperCamelCase ,default="train" ,choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" ,default=__UpperCamelCase ,type=__UpperCamelCase ,help="Limit the number of shards (used for debugging)." ,)
parser.add_argument(
"--max_length" ,type=__UpperCamelCase ,default=512 ,help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." ,)
parser.add_argument(
"--output_dir" ,default="tf-tpu" ,type=__UpperCamelCase ,help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." ,)
A_ = parser.parse_args()
return args
def __snake_case ( __UpperCamelCase : Union[str, Any] ):
"""simple docstring"""
def fn(__UpperCamelCase : Union[str, Any] ):
return tokenizer(examples["text"] )
return fn
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
A_ = []
for i in range(len(tokenized_data["input_ids"] ) ):
A_ = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
A_ = tf.train.Features(feature=__UpperCamelCase )
A_ = tf.train.Example(features=__UpperCamelCase )
A_ = example.SerializeToString()
records.append(__UpperCamelCase )
return records
def __snake_case ( __UpperCamelCase : Optional[int] ):
"""simple docstring"""
A_ = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split )
if args.limit is not None:
A_ = min(len(__UpperCamelCase ) ,args.limit )
A_ = dataset.select(range(__UpperCamelCase ) )
print(f'''Limiting the dataset to {args.limit} entries.''' )
A_ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
A_ = os.path.join(args.output_dir ,args.split )
if not os.path.exists(__UpperCamelCase ):
os.makedirs(__UpperCamelCase )
else:
A_ = os.path.join(args.output_dir ,args.split )
# Tokenize the whole dataset at once.
A_ = tokenize_function(__UpperCamelCase )
A_ = dataset.map(__UpperCamelCase ,batched=__UpperCamelCase ,num_proc=4 ,remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(__UpperCamelCase : Any ):
# Concatenate all texts.
A_ = {k: sum(examples[k] ,[] ) for k in examples.keys()}
A_ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
A_ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
A_ = {
k: [t[i : i + args.max_length] for i in range(0 ,__UpperCamelCase ,args.max_length )]
for k, t in concatenated_examples.items()
}
return result
A_ = dataset_tokenized.map(__UpperCamelCase ,batched=__UpperCamelCase ,batch_size=1000 ,num_proc=4 )
A_ = 0
A_ = 0
for shard in range(0 ,len(__UpperCamelCase ) ,args.shard_size ):
A_ = grouped_dataset[shard : shard + args.shard_size]
A_ = len(dataset_snapshot["input_ids"] )
A_ = os.path.join(__UpperCamelCase ,f'''dataset-{shard_count}-{records_containing}.tfrecord''' )
A_ = get_serialized_examples(__UpperCamelCase )
with tf.io.TFRecordWriter(__UpperCamelCase ) as out_file:
for i in range(len(__UpperCamelCase ) ):
A_ = serialized_examples[i]
out_file.write(__UpperCamelCase )
print("Wrote file {} containing {} records".format(__UpperCamelCase ,__UpperCamelCase ) )
shard_count += 1
total_records += records_containing
with open(f'''split-{args.split}-records-count.txt''' ,"w" ) as f:
print(f'''Total {args.split} records: {total_records}''' ,file=__UpperCamelCase )
if __name__ == "__main__":
__a :List[Any] = parse_args()
main(args)
| 329
|
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329
| 1
|
'''simple docstring'''
import re
def __a ( UpperCAmelCase ) ->bool:
"""simple docstring"""
A = re.compile(R"""^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$""" )
if match := re.search(UpperCAmelCase , UpperCAmelCase ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 258
|
'''simple docstring'''
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class __UpperCAmelCase :
'''simple docstring'''
def __init__(self : Union[str, Any] , _lowerCAmelCase : Optional[int]=None , **_lowerCAmelCase : Union[str, Any] ):
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A = model
A = kwargs.get("""model_save_dir""" , _lowerCAmelCase )
A = kwargs.get("""latest_model_name""" , _lowerCAmelCase )
def __call__(self : Tuple , **_lowerCAmelCase : Optional[Any] ):
A = {k: np.array(_lowerCAmelCase ) for k, v in kwargs.items()}
return self.model.run(_lowerCAmelCase , _lowerCAmelCase )
@staticmethod
def A (_lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Optional[Any]=None ):
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A = """CPUExecutionProvider"""
return ort.InferenceSession(_lowerCAmelCase , providers=[provider] , sess_options=_lowerCAmelCase )
def A (self : List[str] , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : List[str] ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A = self.model_save_dir.joinpath(self.latest_model_name )
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A = self.model_save_dir.joinpath(_lowerCAmelCase )
if src_path.exists():
A = Path(_lowerCAmelCase ).joinpath(_lowerCAmelCase )
try:
shutil.copyfile(_lowerCAmelCase , _lowerCAmelCase )
except shutil.SameFileError:
pass
def A (self : Tuple , _lowerCAmelCase : Union[str, os.PathLike] , **_lowerCAmelCase : str , ):
if os.path.isfile(_lowerCAmelCase ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# saving model weights/files
self._save_pretrained(_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : Optional[Union[bool, str, None]] = None , _lowerCAmelCase : Optional[Union[str, None]] = None , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional["ort.SessionOptions"] = None , **_lowerCAmelCase : Optional[int] , ):
A = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(_lowerCAmelCase ):
A = OnnxRuntimeModel.load_model(
os.path.join(_lowerCAmelCase , _lowerCAmelCase ) , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
A = Path(_lowerCAmelCase )
# load model from hub
else:
# download model
A = hf_hub_download(
repo_id=_lowerCAmelCase , filename=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , )
A = Path(_lowerCAmelCase ).parent
A = Path(_lowerCAmelCase ).name
A = OnnxRuntimeModel.load_model(_lowerCAmelCase , provider=_lowerCAmelCase , sess_options=_lowerCAmelCase )
return cls(model=_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def A (cls : str , _lowerCAmelCase : Union[str, Path] , _lowerCAmelCase : bool = True , _lowerCAmelCase : Optional[str] = None , _lowerCAmelCase : Optional[str] = None , **_lowerCAmelCase : str , ):
A = None
if len(str(_lowerCAmelCase ).split("""@""" ) ) == 2:
A , A = model_id.split("""@""" )
return cls._from_pretrained(
model_id=_lowerCAmelCase , revision=_lowerCAmelCase , cache_dir=_lowerCAmelCase , force_download=_lowerCAmelCase , use_auth_token=_lowerCAmelCase , **_lowerCAmelCase , )
| 258
| 1
|
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = "mvp"
lowerCAmelCase__ = ["past_key_values"]
lowerCAmelCase__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , UpperCAmelCase=50267 , UpperCAmelCase=1024 , UpperCAmelCase=12 , UpperCAmelCase=4096 , UpperCAmelCase=16 , UpperCAmelCase=12 , UpperCAmelCase=4096 , UpperCAmelCase=16 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase="gelu" , UpperCAmelCase=1024 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=0.0 , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=1 , UpperCAmelCase=0 , UpperCAmelCase=2 , UpperCAmelCase=True , UpperCAmelCase=2 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=100 , UpperCAmelCase=800 , **UpperCAmelCase , ) -> Dict:
'''simple docstring'''
lowercase_ = vocab_size
lowercase_ = max_position_embeddings
lowercase_ = d_model
lowercase_ = encoder_ffn_dim
lowercase_ = encoder_layers
lowercase_ = encoder_attention_heads
lowercase_ = decoder_ffn_dim
lowercase_ = decoder_layers
lowercase_ = decoder_attention_heads
lowercase_ = dropout
lowercase_ = attention_dropout
lowercase_ = activation_dropout
lowercase_ = activation_function
lowercase_ = init_std
lowercase_ = encoder_layerdrop
lowercase_ = decoder_layerdrop
lowercase_ = classifier_dropout
lowercase_ = use_cache
lowercase_ = encoder_layers
lowercase_ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ = use_prompt
lowercase_ = prompt_length
lowercase_ = prompt_mid_dim
super().__init__(
pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , forced_eos_token_id=UpperCAmelCase , **UpperCAmelCase , )
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , UpperCAmelCase ):
lowercase_ = self.bos_token_id
warnings.warn(
F'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
| 355
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=0 ) -> Optional[int]:
'''simple docstring'''
lowercase_ = 1.0 if scale is None else scale
lowercase_ = 0.0 if loc is None else loc
super().__init__(UpperCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=UpperCAmelCase )] )
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def A__ ( self ) -> str:
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return self.variance.sqrt()
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase_ = args_dim
lowercase_ = nn.ModuleList([nn.Linear(UpperCAmelCase , UpperCAmelCase ) for dim in args_dim.values()] )
lowercase_ = domain_map
def A__ ( self , UpperCAmelCase ) -> Tuple[torch.Tensor]:
'''simple docstring'''
lowercase_ = [proj(UpperCAmelCase ) for proj in self.proj]
return self.domain_map(*UpperCAmelCase )
class __lowerCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> Dict:
'''simple docstring'''
super().__init__()
lowercase_ = function
def A__ ( self , UpperCAmelCase , *UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return self.function(UpperCAmelCase , *UpperCAmelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , UpperCAmelCase = 1 ) -> None:
'''simple docstring'''
lowercase_ = dim
lowercase_ = {k: dim * self.args_dim[k] for k in self.args_dim}
def A__ ( self , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*UpperCAmelCase )
else:
return Independent(self.distribution_class(*UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Distribution:
'''simple docstring'''
lowercase_ = self._base_distribution(UpperCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(UpperCAmelCase , loc=UpperCAmelCase , scale=UpperCAmelCase , event_dim=self.event_dim )
@property
def A__ ( self ) -> Tuple:
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def A__ ( self ) -> int:
'''simple docstring'''
return len(self.event_shape )
@property
def A__ ( self ) -> float:
'''simple docstring'''
return 0.0
def A__ ( self , UpperCAmelCase ) -> nn.Module:
'''simple docstring'''
return ParameterProjection(
in_features=UpperCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def A__ ( self , *UpperCAmelCase ) -> Any:
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def A__ ( UpperCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return (x + torch.sqrt(torch.square(UpperCAmelCase ) + 4.0 )) / 2.0
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"df": 1, "loc": 1, "scale": 1}
lowerCAmelCase__ = StudentT
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowercase_ = 2.0 + cls.squareplus(UpperCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"loc": 1, "scale": 1}
lowerCAmelCase__ = Normal
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = {"total_count": 1, "logits": 1}
lowerCAmelCase__ = NegativeBinomial
@classmethod
def A__ ( cls , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = cls.squareplus(UpperCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def A__ ( self , UpperCAmelCase ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if self.dim == 1:
return self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase )
else:
return Independent(self.distribution_class(total_count=UpperCAmelCase , logits=UpperCAmelCase ) , 1 )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None ) -> Distribution:
'''simple docstring'''
lowercase_ , lowercase_ = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 297
| 0
|
from __future__ import annotations
from math import ceil, floor, sqrt
def _a ( SCREAMING_SNAKE_CASE : int = 2_00_00_00 ) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase: list[int] = [0]
__lowerCAmelCase: int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCAmelCase: int = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCAmelCase: int = 0
# an estimate of b, using the quadratic formula
__lowerCAmelCase: float
# the largest integer less than b_estimate
__lowerCAmelCase: int
# the largest integer less than b_estimate
__lowerCAmelCase: int
# the triangle number corresponding to b_floor
__lowerCAmelCase: int
# the triangle number corresponding to b_ceil
__lowerCAmelCase: int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCAmelCase: Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCAmelCase: Optional[Any] = floor(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[Any] = ceil(SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Union[str, Any] = triangle_numbers[b_floor]
__lowerCAmelCase: Any = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase: List[str] = triangle_b_first_guess * triangle_a
__lowerCAmelCase: Dict = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCAmelCase: int = triangle_b_second_guess * triangle_a
__lowerCAmelCase: List[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(f"{solution() = }")
| 322
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class snake_case__ :
"""simple docstring"""
def __init__( self : List[str], _snake_case : Any, _snake_case : int=1_3, _snake_case : Optional[int]=7, _snake_case : int=True, _snake_case : Optional[Any]=True, _snake_case : Optional[Any]=True, _snake_case : Union[str, Any]=9_9, _snake_case : Optional[Any]=3_2, _snake_case : Tuple=5, _snake_case : str=4, _snake_case : Any=3_7, _snake_case : int="gelu", _snake_case : Optional[Any]=0.1, _snake_case : str=0.1, _snake_case : str=5_1_2, _snake_case : Dict=1_6, _snake_case : str=2, _snake_case : Union[str, Any]=0.0_2, _snake_case : Optional[int]=3, _snake_case : Union[str, Any]=4, _snake_case : Tuple=None, ) ->Optional[Any]:
snake_case__ : Optional[int] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : str = is_training
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : str = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Any = max_position_embeddings
snake_case__ : Union[str, Any] = type_vocab_size
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[int] = initializer_range
snake_case__ : Optional[int] = num_labels
snake_case__ : str = num_choices
snake_case__ : int = scope
snake_case__ : List[str] = self.vocab_size - 1
def lowercase_ ( self : Union[str, Any] ) ->Tuple:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : str = None
snake_case__ : List[Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
snake_case__ : Union[str, Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, n_positions=self.max_position_embeddings, pad_token_id=self.pad_token_id, )
snake_case__ : List[str] = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Any, _snake_case : List[str], _snake_case : Any, _snake_case : List[Any], _snake_case : Tuple, *_snake_case : Optional[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = OpenAIGPTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, head_mask=_snake_case )
snake_case__ : Union[str, Any] = model(_snake_case, token_type_ids=_snake_case )
snake_case__ : Optional[Any] = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Optional[int], _snake_case : Optional[Any], _snake_case : Union[str, Any], _snake_case : Optional[int], _snake_case : List[Any], *_snake_case : Dict ) ->Optional[int]:
snake_case__ : Optional[Any] = OpenAIGPTLMHeadModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Tuple = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, _snake_case : Tuple, _snake_case : List[str], _snake_case : List[Any], _snake_case : List[Any], *_snake_case : List[Any] ) ->Optional[int]:
snake_case__ : List[str] = OpenAIGPTDoubleHeadsModel(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : Optional[Any] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.loss.shape, () )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], _snake_case : Tuple, _snake_case : Dict, _snake_case : List[str], _snake_case : Optional[Any], *_snake_case : Union[str, Any] ) ->str:
snake_case__ : List[str] = self.num_labels
snake_case__ : Dict = OpenAIGPTForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
snake_case__ : List[str] = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case__ : List[str] = model(_snake_case, token_type_ids=_snake_case, labels=_snake_case )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[Any] = config_and_inputs
snake_case__ : str = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowercase_ ( self : Optional[int], _snake_case : Union[str, Any], _snake_case : int, _snake_case : Tuple, _snake_case : Tuple, _snake_case : List[str] ) ->Optional[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def lowercase_ ( self : Optional[Any], _snake_case : Union[str, Any], _snake_case : List[str], _snake_case : Any=False ) ->Tuple:
snake_case__ : Optional[int] = super()._prepare_for_class(_snake_case, _snake_case, return_labels=_snake_case )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
snake_case__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=_snake_case, )
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : List[Any] = inputs_dict['labels']
snake_case__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=_snake_case, )
snake_case__ : Tuple = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=_snake_case )
return inputs_dict
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : List[str] = OpenAIGPTModelTester(self )
snake_case__ : Any = ConfigTester(self, config_class=_snake_case, n_embd=3_7 )
def lowercase_ ( self : Optional[int] ) ->str:
self.config_tester.run_common_tests()
def lowercase_ ( self : int ) ->Tuple:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_snake_case )
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_snake_case )
def lowercase_ ( self : Dict ) ->int:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_snake_case )
def lowercase_ ( self : int ) ->str:
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_snake_case )
@slow
def lowercase_ ( self : Optional[Any] ) ->str:
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = OpenAIGPTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self : Tuple ) ->Optional[int]:
snake_case__ : Union[str, Any] = OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(_snake_case )
snake_case__ : Tuple = torch.tensor([[4_8_1, 4_7_3_5, 5_4_4]], dtype=torch.long, device=_snake_case ) # the president is
snake_case__ : int = [
4_8_1,
4_7_3_5,
5_4_4,
2_4_6,
9_6_3,
8_7_0,
7_6_2,
2_3_9,
2_4_4,
4_0_4_7_7,
2_4_4,
2_4_9,
7_1_9,
8_8_1,
4_8_7,
5_4_4,
2_4_0,
2_4_4,
6_0_3,
4_8_1,
] # the president is a very good man. " \n " i\'m sure he is, " said the
snake_case__ : Optional[int] = model.generate(_snake_case, do_sample=_snake_case )
self.assertListEqual(output_ids[0].tolist(), _snake_case )
| 277
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class a__ ( __lowercase , unittest.TestCase ):
a : Optional[Any] = BarthezTokenizer
a : List[Any] = BarthezTokenizerFast
a : int = True
a : Union[str, Any] = True
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
super().setUp()
a = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_a )
a = tokenizer
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
a = "<pad>"
a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(_a ) , 101122 )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
a = ["A long paragraph for summarization.", "Another paragraph for summarization."]
a = [0, 57, 3018, 70307, 91, 2]
a = self.tokenizer(
_a , max_length=len(_a ) , padding=_a , truncation=_a , return_tensors="pt" )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
a = batch.input_ids.tolist()[0]
self.assertListEqual(_a , _a )
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a = self.get_tokenizer()
a = self.get_rust_tokenizer()
a = "I was born in 92000, and this is falsé."
a = tokenizer.tokenize(_a )
a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
a = tokenizer.encode(_a , add_special_tokens=_a )
a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
a = self.get_rust_tokenizer()
a = tokenizer.encode(_a )
a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
a = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=_a , )
| 350
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Dict = "▁"
lowercase__ : Union[str, Any] = {"vocab_file": "spiece.model"}
lowercase__ : Union[str, Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
lowercase__ : Tuple = {
"google/reformer-crime-and-punishment": 524_288,
}
class a__ ( UpperCamelCase__ ):
a : List[Any] = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , A , A="</s>" , A="<unk>" , A=[] , A = None , **A , ) -> None:
'''simple docstring'''
a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=A , unk_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , )
a = vocab_file
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A )
@property
def lowerCAmelCase_ ( self ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.get_piece_size()
def lowerCAmelCase_ ( self ) -> Dict[str, int]:
'''simple docstring'''
a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[int]:
'''simple docstring'''
a = self.__dict__.copy()
a = None
return state
def __setstate__( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a = {}
a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowerCAmelCase_ ( self , A ) -> List[str]:
'''simple docstring'''
return self.sp_model.encode(A , out_type=A )
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.piece_to_id(A )
def lowerCAmelCase_ ( self , A ) -> Optional[int]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
a = self.sp_model.IdToPiece(A )
return token
def lowerCAmelCase_ ( self , A ) -> Union[str, Any]:
'''simple docstring'''
a = []
a = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def lowerCAmelCase_ ( self , A , A = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(A ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a = os.path.join(
A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A )
elif not os.path.isfile(self.vocab_file ):
with open(A , "wb" ) as fi:
a = self.sp_model.serialized_model_proto()
fi.write(A )
return (out_vocab_file,)
| 180
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
a : int = 8
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=BITS ) -> Any:
'''simple docstring'''
snake_case_ = x.device
snake_case_ = (x * 255).int().clamp(0, 255 )
snake_case_ = 2 ** torch.arange(bits - 1, -1, -1, device=_lowerCAmelCase )
snake_case_ = rearrange(_lowerCAmelCase, '''d -> d 1 1''' )
snake_case_ = rearrange(_lowerCAmelCase, '''b c h w -> b c 1 h w''' )
snake_case_ = ((x & mask) != 0).float()
snake_case_ = rearrange(_lowerCAmelCase, '''b c d h w -> b (c d) h w''' )
snake_case_ = bits * 2 - 1
return bits
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=BITS ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = x.device
snake_case_ = (x > 0).int()
snake_case_ = 2 ** torch.arange(bits - 1, -1, -1, device=_lowerCAmelCase, dtype=torch.intaa )
snake_case_ = rearrange(_lowerCAmelCase, '''d -> d 1 1''' )
snake_case_ = rearrange(_lowerCAmelCase, '''b (c d) h w -> b c d h w''', d=8 )
snake_case_ = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''' )
return (dec / 255).clamp(0.0, 1.0 )
def __magic_name__ ( self, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase = 0.0, __UpperCAmelCase = True, __UpperCAmelCase=None, __UpperCAmelCase = True, ) -> Union[DDIMSchedulerOutput, Tuple]:
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case_ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case_ = self.alphas_cumprod[timestep]
snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case_ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case_ = self.bit_scale
if self.config.clip_sample:
snake_case_ = torch.clamp(_lowerCAmelCase, -scale, _lowerCAmelCase )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case_ = self._get_variance(_lowerCAmelCase, _lowerCAmelCase )
snake_case_ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case_ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case_ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case_ = model_output.device if torch.is_tensor(_lowerCAmelCase ) else 'cpu'
snake_case_ = torch.randn(model_output.shape, dtype=model_output.dtype, generator=_lowerCAmelCase ).to(_lowerCAmelCase )
snake_case_ = self._get_variance(_lowerCAmelCase, _lowerCAmelCase ) ** 0.5 * eta * noise
snake_case_ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_lowerCAmelCase, pred_original_sample=_lowerCAmelCase )
def __magic_name__ ( self, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase="epsilon", __UpperCAmelCase=None, __UpperCAmelCase = True, ) -> Union[DDPMSchedulerOutput, Tuple]:
'''simple docstring'''
snake_case_ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case_ = torch.split(_lowerCAmelCase, sample.shape[1], dim=1 )
else:
snake_case_ = None
# 1. compute alphas, betas
snake_case_ = self.alphas_cumprod[t]
snake_case_ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case_ = 1 - alpha_prod_t
snake_case_ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case_ = model_output
else:
raise ValueError(F"Unsupported prediction_type {prediction_type}." )
# 3. Clip "predicted x_0"
snake_case_ = self.bit_scale
if self.config.clip_sample:
snake_case_ = torch.clamp(_lowerCAmelCase, -scale, _lowerCAmelCase )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case_ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case_ = 0
if t > 0:
snake_case_ = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=_lowerCAmelCase ).to(model_output.device )
snake_case_ = (self._get_variance(_lowerCAmelCase, predicted_variance=_lowerCAmelCase ) ** 0.5) * noise
snake_case_ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_lowerCAmelCase, pred_original_sample=_lowerCAmelCase )
class a ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : List[str] , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, DDPMScheduler] , lowercase_ : Optional[float] = 1.0 , ):
super().__init__()
snake_case_ = bit_scale
snake_case_ = (
ddim_bit_scheduler_step if isinstance(_a , _a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self : Tuple , lowercase_ : Optional[int] = 256 , lowercase_ : Optional[int] = 256 , lowercase_ : Optional[int] = 50 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , **lowercase_ : Dict , ):
snake_case_ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=_a , )
snake_case_ = decimal_to_bits(_a ) * self.bit_scale
snake_case_ = latents.to(self.device )
self.scheduler.set_timesteps(_a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case_ = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case_ = self.scheduler.step(_a , _a , _a ).prev_sample
snake_case_ = bits_to_decimal(_a )
if output_type == "pil":
snake_case_ = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 56
|
'''simple docstring'''
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_UpperCamelCase = data_utils.TransfoXLTokenizer
_UpperCamelCase = data_utils.TransfoXLCorpus
_UpperCamelCase = data_utils
_UpperCamelCase = data_utils
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_lowerCAmelCase ,'rb' ) as fp:
__lowerCamelCase : Optional[Any] = pickle.load(_lowerCAmelCase ,encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCamelCase : Tuple = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
__lowerCamelCase : str = corpus.vocab.__dict__
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' ,_lowerCAmelCase )
__lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(_lowerCAmelCase ,_lowerCAmelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCamelCase : int = os.path.abspath(_lowerCAmelCase )
__lowerCamelCase : Any = os.path.abspath(_lowerCAmelCase )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCamelCase : Optional[int] = TransfoXLConfig()
else:
__lowerCamelCase : Optional[int] = TransfoXLConfig.from_json_file(_lowerCAmelCase )
print(F'Building PyTorch model from configuration: {config}' )
__lowerCamelCase : List[str] = TransfoXLLMHeadModel(_lowerCAmelCase )
__lowerCamelCase : Dict = load_tf_weights_in_transfo_xl(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
# Save pytorch-model
__lowerCamelCase : List[str] = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
__lowerCamelCase : int = os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
print(F'Save PyTorch model to {os.path.abspath(_lowerCAmelCase )}' )
torch.save(model.state_dict() ,_lowerCAmelCase )
print(F'Save configuration file to {os.path.abspath(_lowerCAmelCase )}' )
with open(_lowerCAmelCase ,'w' ,encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--tf_checkpoint_path',
default='',
type=str,
help='An optional path to a TensorFlow checkpoint path to be converted.',
)
parser.add_argument(
'--transfo_xl_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained BERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--transfo_xl_dataset_file',
default='',
type=str,
help='An optional dataset file to be converted in a vocabulary.',
)
_UpperCamelCase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 208
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
@require_tf
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : str) ->Optional[int]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"]):
lowerCamelCase__: str =model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
lowerCamelCase__: Optional[Any] ="sshleifer/tiny-gpt2"
lowerCamelCase__: Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: Any =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: Any ="sgugger/tiny-distilbert-classification"
lowerCamelCase__: Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , only_pretrain_model=UpperCAmelCase_ , )
lowerCamelCase__: str =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: str =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: List[Any] ="sshleifer/tiny-gpt2"
lowerCamelCase__: Dict =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: Optional[int] =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Dict ="sshleifer/tiny-gpt2"
lowerCamelCase__: Any =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: List[str] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: str =TensorFlowBenchmark(UpperCAmelCase_ , [config])
lowerCamelCase__: Any =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int ="sshleifer/tiny-gpt2"
lowerCamelCase__: Dict =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: Optional[int] =TensorFlowBenchmark(UpperCAmelCase_ , [config])
lowerCamelCase__: Tuple =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] ="sshleifer/tiny-gpt2"
lowerCamelCase__: Any =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: int =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: Any =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Optional[int] ="sshleifer/tiny-gpt2"
lowerCamelCase__: Any =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Optional[int] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: Union[str, Any] =TensorFlowBenchmark(UpperCAmelCase_ , [config])
lowerCamelCase__: Tuple =benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str ="patrickvonplaten/t5-tiny-random"
lowerCamelCase__: str =AutoConfig.from_pretrained(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: int =TensorFlowBenchmark(UpperCAmelCase_ , configs=[config])
lowerCamelCase__: List[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("GPU")) == 0 , "Cannot do xla on CPU.")
def SCREAMING_SNAKE_CASE_ (self : Any) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] ="sshleifer/tiny-gpt2"
lowerCamelCase__: Tuple =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , training=UpperCAmelCase_ , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , use_xla=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: Tuple =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] ="sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: Any =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase_ , save_to_csv=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(UpperCAmelCase_ , "inf_time.csv") , inference_memory_csv_file=os.path.join(UpperCAmelCase_ , "inf_mem.csv") , env_info_csv_file=os.path.join(UpperCAmelCase_ , "env.csv") , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: int =TensorFlowBenchmark(UpperCAmelCase_)
benchmark.run()
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , "inf_time.csv")).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , "inf_mem.csv")).exists())
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , "env.csv")).exists())
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(UpperCAmelCase_ : Optional[int]):
self.assertTrue(hasattr(UpperCAmelCase_ , "sequential"))
self.assertTrue(hasattr(UpperCAmelCase_ , "cumulative"))
self.assertTrue(hasattr(UpperCAmelCase_ , "current"))
self.assertTrue(hasattr(UpperCAmelCase_ , "total"))
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase__: Any =TensorFlowBenchmarkArguments(
models=[MODEL_ID] , inference=UpperCAmelCase_ , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(UpperCAmelCase_ , "log.txt") , log_print=UpperCAmelCase_ , trace_memory_line_by_line=UpperCAmelCase_ , eager_mode=UpperCAmelCase_ , multi_process=UpperCAmelCase_ , )
lowerCamelCase__: List[Any] =TensorFlowBenchmark(UpperCAmelCase_)
lowerCamelCase__: Tuple =benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
self.assertTrue(Path(os.path.join(UpperCAmelCase_ , "log.txt")).exists())
| 355
|
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
assert x is not None
assert y is not None
lowerCamelCase__: Any =len(__a )
lowerCamelCase__: int =len(__a )
# declaring the array for storing the dp values
lowerCamelCase__: List[Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__: str =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__: str =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__: Any =""
lowerCamelCase__ , lowerCamelCase__: str =m, n
while i > 0 and j > 0:
lowerCamelCase__: Union[str, Any] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__: Any =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 273
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = original_name.split("." )[0]
lowerCAmelCase : Any = key.split("." )
lowerCAmelCase : List[str] = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 2] )
lowerCAmelCase : Tuple = int(key_list[key_list.index(SCREAMING_SNAKE_CASE ) - 1] )
lowerCAmelCase : str = orig_block_num - offset
lowerCAmelCase : Optional[int] = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : int = OrderedDict()
lowerCAmelCase , lowerCAmelCase : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("network" ):
lowerCAmelCase : Optional[Any] = key.replace("network" , "poolformer.encoder" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("bias" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowerCAmelCase : Any = key[: key.find("proj" )]
lowerCAmelCase : Any = key.replace(SCREAMING_SNAKE_CASE , f"""patch_embeddings.{total_embed_found}.""" )
lowerCAmelCase : int = key.replace("proj" , "projection" )
if key.endswith("bias" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowerCAmelCase : Optional[int] = "poolformer.encoder." + key
if "mlp.fc1" in key:
lowerCAmelCase : Optional[int] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc1" , "output.conv1" )
if "mlp.fc2" in key:
lowerCAmelCase : List[str] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "mlp.fc2" , "output.conv2" )
if "norm1" in key:
lowerCAmelCase : Optional[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm1" , "before_norm" )
if "norm2" in key:
lowerCAmelCase : int = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "norm2" , "after_norm" )
if "layer_scale_1" in key:
lowerCAmelCase : Tuple = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_1" , "layer_scale_1" )
if "layer_scale_2" in key:
lowerCAmelCase : List[Any] = replace_key_with_offset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , "layer_scale_2" , "layer_scale_2" )
if "head" in key:
lowerCAmelCase : Optional[int] = key.replace("head" , "classifier" )
lowerCAmelCase : List[str] = value
return new_state_dict
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
@torch.no_grad()
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
lowerCAmelCase : str = PoolFormerConfig()
# set attributes based on model_name
lowerCAmelCase : List[str] = "huggingface/label-files"
lowerCAmelCase : str = model_name[-3:]
lowerCAmelCase : List[Any] = 1_0_0_0
lowerCAmelCase : Optional[int] = "imagenet-1k-id2label.json"
lowerCAmelCase : int = (1, 1_0_0_0)
# set config attributes
lowerCAmelCase : int = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
lowerCAmelCase : List[Any] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCAmelCase : List[Any] = idalabel
lowerCAmelCase : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
lowerCAmelCase : Union[str, Any] = [2, 2, 6, 2]
lowerCAmelCase : Tuple = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : str = 4.0
lowerCAmelCase : Dict = 0.9
elif size == "s24":
lowerCAmelCase : Dict = [4, 4, 1_2, 4]
lowerCAmelCase : Dict = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : Optional[int] = 4.0
lowerCAmelCase : List[str] = 0.9
elif size == "s36":
lowerCAmelCase : List[Any] = [6, 6, 1_8, 6]
lowerCAmelCase : int = [6_4, 1_2_8, 3_2_0, 5_1_2]
lowerCAmelCase : Dict = 4.0
lowerCAmelCase : int = 1E-6
lowerCAmelCase : Union[str, Any] = 0.9
elif size == "m36":
lowerCAmelCase : List[Any] = [6, 6, 1_8, 6]
lowerCAmelCase : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
lowerCAmelCase : Tuple = 4.0
lowerCAmelCase : int = 1E-6
lowerCAmelCase : Optional[Any] = 0.95
elif size == "m48":
lowerCAmelCase : Tuple = [8, 8, 2_4, 8]
lowerCAmelCase : Tuple = [9_6, 1_9_2, 3_8_4, 7_6_8]
lowerCAmelCase : str = 4.0
lowerCAmelCase : Optional[Any] = 1E-6
lowerCAmelCase : Tuple = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
lowerCAmelCase : Optional[Any] = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
# Prepare image
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
lowerCAmelCase : Dict = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device("cpu" ) )
# rename keys
lowerCAmelCase : str = rename_keys(SCREAMING_SNAKE_CASE )
# create HuggingFace model and load state dict
lowerCAmelCase : List[Any] = PoolFormerForImageClassification(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
# Define image processor
lowerCAmelCase : Any = PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = image_processor(images=prepare_img() , return_tensors="pt" ).pixel_values
# forward pass
lowerCAmelCase : List[Any] = model(SCREAMING_SNAKE_CASE )
lowerCAmelCase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowerCAmelCase : Optional[Any] = torch.tensor([-0.3_045, -0.6_758, -0.4_869] )
elif size == "s24":
lowerCAmelCase : Any = torch.tensor([0.4_402, -0.1_374, -0.8_045] )
elif size == "s36":
lowerCAmelCase : int = torch.tensor([-0.6_080, -0.5_133, -0.5_898] )
elif size == "m36":
lowerCAmelCase : str = torch.tensor([0.3_952, 0.2_263, -1.2_668] )
elif size == "m48":
lowerCAmelCase : Optional[Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''poolformer_s12''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 108
|
'''simple docstring'''
import argparse
from collections import defaultdict
import yaml
a : str = "docs/source/en/_toctree.yml"
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = defaultdict(__magic_name__ )
for doc in model_doc:
counts[doc["local"]] += 1
UpperCAmelCase : List[Any] = [key for key, value in counts.items() if value > 1]
UpperCAmelCase : Dict = []
for duplicate_key in duplicates:
UpperCAmelCase : Union[str, Any] = list({doc["title"] for doc in model_doc if doc["local"] == duplicate_key} )
if len(__magic_name__ ) > 1:
raise ValueError(
F"{duplicate_key} is present several times in the documentation table of content at "
"`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the "
"others." )
# Only add this once
new_doc.append({"local": duplicate_key, "title": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["local"]] == 1] )
# Sort
return sorted(__magic_name__ , key=lambda __magic_name__ : s["title"].lower() )
def lowercase ( __magic_name__=False ):
'''simple docstring'''
with open(__magic_name__ , encoding="utf-8" ) as f:
UpperCAmelCase : Any = yaml.safe_load(f.read() )
# Get to the API doc
UpperCAmelCase : Optional[int] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
UpperCAmelCase : Union[str, Any] = content[api_idx]["sections"]
# Then to the model doc
UpperCAmelCase : Any = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
UpperCAmelCase : str = api_doc[model_idx]["sections"]
UpperCAmelCase : Any = [(idx, section) for idx, section in enumerate(__magic_name__ ) if "sections" in section]
UpperCAmelCase : Optional[int] = False
for idx, modality_doc in modalities_docs:
UpperCAmelCase : int = modality_doc["sections"]
UpperCAmelCase : int = clean_model_doc_toc(__magic_name__ )
if old_modality_doc != new_modality_doc:
UpperCAmelCase : int = True
if overwrite:
UpperCAmelCase : Dict = new_modality_doc
if diff:
if overwrite:
UpperCAmelCase : Any = model_doc
UpperCAmelCase : Any = api_doc
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(yaml.dump(__magic_name__ , allow_unicode=__magic_name__ ) )
else:
raise ValueError(
"The model doc part of the table of content is not properly sorted, run `make style` to fix this." )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
a : Optional[Any] = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 311
| 0
|
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : str = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ : List[Any] = '''OwlViTImageProcessor'''
UpperCamelCase__ : str = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
def __call__( self , _A=None , _A=None , _A=None , _A="max_length" , _A="np" , **_A ):
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
'You have to specify at least one text or query image or image. All three cannot be none.' )
if text is not None:
if isinstance(_A , _A ) or (isinstance(_A , _A ) and not isinstance(text[0] , _A )):
__SCREAMING_SNAKE_CASE = [self.tokenizer(_A , padding=_A , return_tensors=_A , **_A )]
elif isinstance(_A , _A ) and isinstance(text[0] , _A ):
__SCREAMING_SNAKE_CASE = []
# Maximum number of queries across batch
__SCREAMING_SNAKE_CASE = max([len(_A ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(_A ) != max_num_queries:
__SCREAMING_SNAKE_CASE = t + [' '] * (max_num_queries - len(_A ))
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , padding=_A , return_tensors=_A , **_A )
encodings.append(_A )
else:
raise TypeError('Input text should be a string, a list of strings or a nested list of strings' )
if return_tensors == "np":
__SCREAMING_SNAKE_CASE = np.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = np.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
__SCREAMING_SNAKE_CASE = jnp.concatenate([encoding['input_ids'] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = jnp.concatenate([encoding['attention_mask'] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
__SCREAMING_SNAKE_CASE = torch.cat([encoding['input_ids'] for encoding in encodings] , dim=0 )
__SCREAMING_SNAKE_CASE = torch.cat([encoding['attention_mask'] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE = tf.stack([encoding['input_ids'] for encoding in encodings] , axis=0 )
__SCREAMING_SNAKE_CASE = tf.stack([encoding['attention_mask'] for encoding in encodings] , axis=0 )
else:
raise ValueError('Target return tensor type could not be returned' )
__SCREAMING_SNAKE_CASE = BatchEncoding()
__SCREAMING_SNAKE_CASE = input_ids
__SCREAMING_SNAKE_CASE = attention_mask
if query_images is not None:
__SCREAMING_SNAKE_CASE = BatchEncoding()
__SCREAMING_SNAKE_CASE = self.image_processor(
_A , return_tensors=_A , **_A ).pixel_values
__SCREAMING_SNAKE_CASE = query_pixel_values
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
__SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
__SCREAMING_SNAKE_CASE = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.image_processor.post_process(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.image_processor.post_process_object_detection(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@property
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 350
|
import os
def __lowercase ( a__ = "input.txt" ) -> int:
with open(os.path.join(os.path.dirname(a__ ) , a__ ) ) as input_file:
__SCREAMING_SNAKE_CASE = [
[int(a__ ) for element in line.split(',' )]
for line in input_file.readlines()
]
__SCREAMING_SNAKE_CASE = len(a__ )
__SCREAMING_SNAKE_CASE = len(matrix[0] )
__SCREAMING_SNAKE_CASE = [[-1 for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = matrix[i][0]
for j in range(1 , a__ ):
for i in range(a__ ):
__SCREAMING_SNAKE_CASE = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , a__ ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__SCREAMING_SNAKE_CASE = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 118
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class lowercase ( lowerCamelCase__ ):
lowercase_ : Optional[int] =field(default='''audio-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowercase_ : List[str] =Features({'''audio''': Audio()} )
lowercase_ : str =Features({'''labels''': ClassLabel} )
lowercase_ : List[Any] ='''audio'''
lowercase_ : List[Any] ='''labels'''
def A__ ( self ,A__):
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.')
if not isinstance(features[self.label_column] ,__snake_case):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.')
lowercase = copy.deepcopy(self)
lowercase = self.label_schema.copy()
lowercase = features[self.label_column]
lowercase = label_schema
return task_template
@property
def A__ ( self):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 101
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCAmelCase: List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class a__( lowerCamelCase__ ):
def __init__( self : Any , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=1 ):
a : Union[str, Any] = tokenizer
a : Union[str, Any] = dataset
a : Any = len(__snake_case ) if n_tasks is None else n_tasks
a : List[str] = n_copies
def __iter__( self : str ):
a : List[Any] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['prompt'].strip() )
a : Dict = self.tokenizer(__snake_case , padding=__snake_case , return_tensors='pt' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : Optional[Any] , __snake_case : str ):
a : Dict = start_length
a : Dict = eof_strings
a : str = tokenizer
def __call__( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Any , **__snake_case : Union[str, Any] ):
a : int = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
a : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(__snake_case )
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.split('(%s)' % '|'.join(_A ) , _A )
# last string should be ""
return "".join(string_list[:-2] )
def lowerCamelCase__ ( _A , _A , _A , _A , _A , _A=20 , **_A ):
a : Optional[Any] = defaultdict(_A ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(_A ) ):
with torch.no_grad():
a : Optional[Any] = batch['ids'].shape[-1]
a : Optional[Any] = accelerator.unwrap_model(_A ).generate(
input_ids=batch['ids'][:, : batch['input_len']] , num_return_sequences=_A , **_A )
# each task is generated batch_size times
a : Tuple = batch['task_id'].repeat(_A )
a : List[Any] = accelerator.pad_across_processes(
_A , dim=1 , pad_index=tokenizer.pad_token_id )
a , a : Union[str, Any] = accelerator.gather((generated_tokens, generated_tasks) )
a : List[str] = generated_tokens.cpu().numpy()
a : int = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(_A , _A ):
gen_token_dict[task].append(_A )
a : Any = [[] for _ in range(_A )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
a : Optional[int] = tokenizer.decode(_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
code_gens[task].append(remove_last_block(_A ) )
return code_gens
def lowerCamelCase__ ( ):
# Setup configuration
a : Dict = HfArgumentParser(_A )
a : Any = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
a : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
a : int = 'false'
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
a : List[Any] = Accelerator()
set_seed(args.seed , device_specific=_A )
# Load model and tokenizer
a : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a : str = tokenizer.eos_token
a : List[Any] = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
a : Optional[Any] = {
'do_sample': args.do_sample,
'temperature': args.temperature,
'max_new_tokens': args.max_new_tokens,
'top_p': args.top_p,
'top_k': args.top_k,
'stopping_criteria': StoppingCriteriaList([EndOfFunctionCriteria(0 , _A , _A )] ),
}
# Load evaluation dataset and metric
a : Optional[int] = load_dataset('openai_humaneval' )
a : Optional[Any] = load_metric('code_eval' )
a : Optional[int] = args.num_tasks if args.num_tasks is not None else len(human_eval['test'] )
a : Optional[Any] = args.n_samples // args.batch_size
a : Any = TokenizedDataset(_A , human_eval['test'] , n_copies=_A , n_tasks=_A )
# do not confuse args.batch_size, which is actually the num_return_sequences
a : int = DataLoader(_A , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
a : int = code_eval_metric.compute(references=[''] , predictions=[['']] )
except ValueError as exception:
print(
'Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'
' flag to enable code evaluation.' )
raise exception
a , a : int = accelerator.prepare(_A , _A )
a : int = complete_code(
_A , _A , _A , _A , n_tasks=_A , batch_size=args.batch_size , **_A , )
if accelerator.is_main_process:
a : List[str] = []
for task in tqdm(range(_A ) ):
a : int = human_eval['test'][task]['test']
a : int = f"""check({human_eval["test"][task]["entry_point"]})"""
references.append('\n' + test_func + '\n' + entry_point )
# Evaluate completions with "code_eval" metric
a , a : Tuple = code_eval_metric.compute(
references=_A , predictions=_A , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , 'w' ) as fp:
json.dump(_A , _A )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 297
| 0
|
"""simple docstring"""
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class _lowerCamelCase ( unittest.TestCase ):
def __init__(self , __a , __a=7 , __a=3 , __a=18 , __a=30 , __a=4_00 , __a=None , __a=True , __a=True , __a=None , ) -> Dict:
UpperCamelCase = size if size is not None else {"height": 20, "width": 20}
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = size
UpperCamelCase = do_normalize
UpperCamelCase = do_convert_rgb
UpperCamelCase = [5_12, 10_24, 20_48, 40_96]
UpperCamelCase = patch_size if patch_size is not None else {"height": 16, "width": 16}
def snake_case_ (self ) -> List[Any]:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def snake_case_ (self ) -> int:
UpperCamelCase = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
UpperCamelCase = Image.open(requests.get(__a , stream=__a ).raw ).convert("RGB" )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ (self ) -> Tuple:
UpperCamelCase = PixaStructImageProcessingTester(self )
@property
def snake_case_ (self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.image_processor_tester.prepare_dummy_image()
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
UpperCamelCase = 20_48
UpperCamelCase = image_processor(__a , return_tensors="pt" , max_patches=__a )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) )
def snake_case_ (self ) -> Optional[Any]:
# Initialize image_processor
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ (self ) -> Any:
# Initialize image_processor
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
UpperCamelCase = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__a ):
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
UpperCamelCase = "Hello"
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__a , return_tensors="pt" , max_patches=__a , header_text=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ (self ) -> List[str]:
# Initialize image_processor
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , numpify=__a )
for image in image_inputs:
self.assertIsInstance(__a , np.ndarray )
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
def snake_case_ (self ) -> Dict:
# Initialize image_processor
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a , torchify=__a )
for image in image_inputs:
self.assertIsInstance(__a , torch.Tensor )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason="`Pix2StructImageProcessor` requires `torch>=1.11.0`." , )
@require_torch
@require_vision
class _lowerCamelCase ( _lowercase , unittest.TestCase ):
UpperCAmelCase_ = PixaStructImageProcessor if is_vision_available() else None
def snake_case_ (self ) -> Union[str, Any]:
UpperCamelCase = PixaStructImageProcessingTester(self , num_channels=4 )
UpperCamelCase = 3
@property
def snake_case_ (self ) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ (self ) -> Optional[int]:
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__a , "do_normalize" ) )
self.assertTrue(hasattr(__a , "do_convert_rgb" ) )
def snake_case_ (self ) -> List[Any]:
# Initialize image_processor
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__a )
for image in image_inputs:
self.assertIsInstance(__a , Image.Image )
# Test not batched input
UpperCamelCase = (
(self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
UpperCamelCase = image_processor(
image_inputs[0] , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (1, max_patch, expected_hidden_dim) , )
# Test batched
UpperCamelCase = image_processor(
__a , return_tensors="pt" , max_patches=__a ).flattened_patches
self.assertEqual(
encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
| 244
|
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase = DetaConfig(
backbone_config=_SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=_SCREAMING_SNAKE_CASE , with_box_refine=_SCREAMING_SNAKE_CASE , two_stage=_SCREAMING_SNAKE_CASE , )
# set labels
UpperCamelCase = "huggingface/label-files"
if "o365" in model_name:
UpperCamelCase = 366
UpperCamelCase = "object365-id2label.json"
else:
UpperCamelCase = 91
UpperCamelCase = "coco-detection-id2label.json"
UpperCamelCase = num_labels
UpperCamelCase = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase = idalabel
UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.norm2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias", F"model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias") )
if i < 3:
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.reduction.weight", F"model.backbone.model.encoder.layers.{i}.downsample.reduction.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.weight", F"model.backbone.model.encoder.layers.{i}.downsample.norm.weight") )
rename_keys.append((F"backbone.0.body.layers.{i}.downsample.norm.bias", F"model.backbone.model.encoder.layers.{i}.downsample.norm.bias") )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight", F"model.encoder.layers.{i}.self_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias", F"model.encoder.layers.{i}.self_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.weight", F"model.encoder.layers.{i}.self_attn.attention_weights.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.attention_weights.bias", F"model.encoder.layers.{i}.self_attn.attention_weights.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.weight", F"model.encoder.layers.{i}.self_attn.value_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.value_proj.bias", F"model.encoder.layers.{i}.self_attn.value_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.weight", F"model.encoder.layers.{i}.self_attn.output_proj.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.self_attn.output_proj.bias", F"model.encoder.layers.{i}.self_attn.output_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.weight", F"model.encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"model.encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"model.encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"model.encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"model.encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"model.encoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"model.encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"model.encoder.layers.{i}.final_layer_norm.bias") )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias", F"model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.weight", F"model.decoder.layers.{i}.encoder_attn.attention_weights.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.attention_weights.bias", F"model.decoder.layers.{i}.encoder_attn.attention_weights.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.weight", F"model.decoder.layers.{i}.encoder_attn.value_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.value_proj.bias", F"model.decoder.layers.{i}.encoder_attn.value_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.weight", F"model.decoder.layers.{i}.encoder_attn.output_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.cross_attn.output_proj.bias", F"model.decoder.layers.{i}.encoder_attn.output_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.weight", F"model.decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"model.decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"model.decoder.layers.{i}.self_attn.out_proj.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"model.decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.weight", F"model.decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm2.bias", F"model.decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"model.decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"model.decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"model.decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"model.decoder.layers.{i}.fc2.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"model.decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"model.decoder.layers.{i}.final_layer_norm.bias") )
# fmt: on
return rename_keys
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = dct.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight" )
UpperCamelCase = state_dict.pop(F"backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:dim, :]
UpperCamelCase = in_proj_bias[: dim]
UpperCamelCase = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase = in_proj_bias[
dim : dim * 2
]
UpperCamelCase = in_proj_weight[
-dim :, :
]
UpperCamelCase = in_proj_bias[-dim :]
# fmt: on
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
UpperCamelCase = state_dict.pop(F"transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase = in_proj_weight[:hidden_size, :]
UpperCamelCase = in_proj_bias[:hidden_size]
UpperCamelCase = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase = in_proj_weight[-hidden_size:, :]
UpperCamelCase = in_proj_bias[-hidden_size:]
def a__ ( ):
"""simple docstring"""
UpperCamelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = get_deta_config(_SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F"Model name {model_name} not supported" )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
# original state dict
for name, param in state_dict.items():
print(_SCREAMING_SNAKE_CASE , param.shape )
# rename keys
UpperCamelCase = create_rename_keys(_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(_SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "input_proj" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCamelCase = val
# finally, create HuggingFace model and load state dict
UpperCamelCase = DetaForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase = "cuda" if torch.cuda.is_available() else "cpu"
model.to(_SCREAMING_SNAKE_CASE )
# load image processor
UpperCamelCase = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase = prepare_img()
UpperCamelCase = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
UpperCamelCase = encoding["pixel_values"]
UpperCamelCase = model(pixel_values.to(_SCREAMING_SNAKE_CASE ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase = torch.tensor(
[[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] )
UpperCamelCase = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase = torch.tensor(
[[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] )
UpperCamelCase = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_SCREAMING_SNAKE_CASE ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F"Saving PyTorch model and processor to {pytorch_dump_folder_path}..." )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F"jozhang97/{model_name}" )
processor.push_to_hub(F"jozhang97/{model_name}" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
type=str,
default='''deta-swin-large''',
choices=['''deta-swin-large''', '''deta-swin-large-o365'''],
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
help='''Path to the folder to output PyTorch model.''',
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 244
| 1
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowerCamelCase : Union[str, Any] = TypeVar('''T''')
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (position - 1) // 2
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 1
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
return (2 * position) + 2
class a__ ( Generic[T] ):
def __init__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : list[tuple[T, int]] = []
SCREAMING_SNAKE_CASE_ : dict[T, int] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __len__( self : List[str] ):
"""simple docstring"""
return self.elements
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.heap )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
return self.elements == 0
def __UpperCamelCase ( self : str,_A : T,_A : int ):
"""simple docstring"""
self.heap.append((elem, weight) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.elements
self.elements += 1
self._bubble_up(_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0,self.elements - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[0]
self._bubble_down(_A )
return elem
def __UpperCamelCase ( self : Union[str, Any],_A : T,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.position_map[elem]
SCREAMING_SNAKE_CASE_ : int = (elem, weight)
if position > 0:
SCREAMING_SNAKE_CASE_ : int = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_A )
else:
self._bubble_down(_A )
else:
self._bubble_down(_A )
def __UpperCamelCase ( self : List[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = self.position_map[elem]
if curr_pos == 0:
return None
SCREAMING_SNAKE_CASE_ : Tuple = get_parent_position(_A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_A,_A )
return self._bubble_up(_A )
return None
def __UpperCamelCase ( self : Optional[Any],_A : T ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.position_map[elem]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[curr_pos]
SCREAMING_SNAKE_CASE_ : Optional[int] = get_child_left_position(_A )
SCREAMING_SNAKE_CASE_ : int = get_child_right_position(_A )
if child_left_position < self.elements and child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.heap[child_left_position]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
if child_left_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
else:
return None
if child_right_position < self.elements:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_A,_A )
return self._bubble_down(_A )
return None
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ : List[Any] = self.heap[nodea_pos][0]
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
SCREAMING_SNAKE_CASE_ : Tuple = nodea_pos
SCREAMING_SNAKE_CASE_ : List[Any] = nodea_pos
class a__ ( Generic[T] ):
def __init__( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, dict[T, int]] = {}
SCREAMING_SNAKE_CASE_ : int = 0
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return str(self.connections )
def __len__( self : str ):
"""simple docstring"""
return self.nodes
def __UpperCamelCase ( self : Tuple,_A : T ):
"""simple docstring"""
if node not in self.connections:
SCREAMING_SNAKE_CASE_ : Optional[int] = {}
self.nodes += 1
def __UpperCamelCase ( self : Tuple,_A : T,_A : T,_A : int ):
"""simple docstring"""
self.add_node(_A )
self.add_node(_A )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
SCREAMING_SNAKE_CASE_ : Union[str, Any] = weight
def _snake_case ( lowerCAmelCase : GraphUndirectedWeighted[T] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : dict[T, int] = {node: maxsize for node in graph.connections}
SCREAMING_SNAKE_CASE_ : dict[T, T | None] = {node: None for node in graph.connections}
SCREAMING_SNAKE_CASE_ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase , lowerCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
SCREAMING_SNAKE_CASE_ : Optional[Any] = priority_queue.extract_min()
SCREAMING_SNAKE_CASE_ : Dict = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : int = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : Optional[int] = node
# running prim's algorithm
while not priority_queue.is_empty():
SCREAMING_SNAKE_CASE_ : Any = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
SCREAMING_SNAKE_CASE_ : Dict = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase , dist[neighbour] )
SCREAMING_SNAKE_CASE_ : int = node
return dist, parent
| 18
|
from math import isqrt, loga
def snake_case ( snake_case__ :int) -> list[int]:
_A = [True] * max_number
for i in range(2 , isqrt(max_number - 1) + 1):
if is_prime[i]:
for j in range(i**2 , snake_case__ , snake_case__):
_A = False
return [i for i in range(2 , snake_case__) if is_prime[i]]
def snake_case ( snake_case__ :int = 800_800 , snake_case__ :int = 800_800) -> int:
_A = degree * loga(snake_case__)
_A = int(snake_case__)
_A = calculate_prime_numbers(snake_case__)
_A = 0
_A = 0
_A = len(snake_case__) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left])
+ prime_numbers[left] * loga(prime_numbers[right])
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 180
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase : int =logging.get_logger(__name__)
# TODO: upload to AWS
lowerCamelCase : Optional[Any] ={
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class __a ( A__ ):
_lowerCAmelCase : List[Any] = '''retribert'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any]=3_05_22 , SCREAMING_SNAKE_CASE : int=7_68 , SCREAMING_SNAKE_CASE : int=8 , SCREAMING_SNAKE_CASE : Dict=12 , SCREAMING_SNAKE_CASE : List[str]=30_72 , SCREAMING_SNAKE_CASE : Dict="gelu" , SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Tuple=5_12 , SCREAMING_SNAKE_CASE : str=2 , SCREAMING_SNAKE_CASE : Tuple=0.0_2 , SCREAMING_SNAKE_CASE : int=1e-1_2 , SCREAMING_SNAKE_CASE : str=True , SCREAMING_SNAKE_CASE : str=1_28 , SCREAMING_SNAKE_CASE : Dict=0 , **SCREAMING_SNAKE_CASE : str , ):
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = vocab_size
UpperCamelCase__ : List[Any] = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : Tuple = intermediate_size
UpperCamelCase__ : str = hidden_dropout_prob
UpperCamelCase__ : List[Any] = attention_probs_dropout_prob
UpperCamelCase__ : Optional[Any] = max_position_embeddings
UpperCamelCase__ : str = type_vocab_size
UpperCamelCase__ : Any = initializer_range
UpperCamelCase__ : Union[str, Any] = layer_norm_eps
UpperCamelCase__ : List[str] = share_encoders
UpperCamelCase__ : List[Any] = projection_dim
| 361
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Dict =logging.get_logger(__name__)
class __a ( A__ ):
_lowerCAmelCase : Optional[int] = '''timm_backbone'''
def __init__( self : Dict , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : List[Any]=3 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Dict=True , SCREAMING_SNAKE_CASE : Union[str, Any]=None , **SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = backbone
UpperCamelCase__ : Dict = num_channels
UpperCamelCase__ : str = features_only
UpperCamelCase__ : Dict = use_pretrained_backbone
UpperCamelCase__ : Tuple = True
UpperCamelCase__ : List[Any] = out_indices if out_indices is not None else (-1,)
| 196
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_UpperCamelCase = logging.getLogger(__name__)
def lowerCAmelCase__( lowercase : List[str] , lowercase : Optional[Any] ) -> Optional[int]:
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , "config.json" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "config.json" ) ):
os.remove(os.path.join(UpperCamelCase__ , "config.json" ) )
if os.path.exists(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) ):
os.remove(os.path.join(UpperCamelCase__ , "pytorch_model.bin" ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCAmelCase__( lowercase : Optional[Any] , lowercase : str=False ) -> Any:
__snake_case : str = 2
if unlogit:
__snake_case : List[Any] = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
__snake_case : str = p * torch.log(UpperCamelCase__ )
__snake_case : Union[str, Any] = 0
return -plogp.sum(dim=-1 )
def lowerCAmelCase__( lowercase : Tuple ) -> Union[str, Any]:
logger.info("lv, h >\t" + "\t".join(f"""{x + 1}""" for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(f"""layer {row + 1}:\t""" + "\t".join(f"""{x:d}""" for x in tensor[row].cpu().data ) )
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[Any]=True , lowercase : Any=True , lowercase : str=None , lowercase : Union[str, Any]=False ) -> Any:
__snake_case , __snake_case : List[Any] = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case : List[str] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
__snake_case : Optional[Any] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
__snake_case : List[str] = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case : Union[str, Any] = None
__snake_case : Optional[int] = 0.0
__snake_case : List[Any] = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc="Iteration" , disable=args.local_rank not in [-1, 0] ) ):
__snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((__snake_case ) , ) : List[Any] = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case : Optional[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case , __snake_case , __snake_case : List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
__snake_case : Any = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case : List[str] = 2
__snake_case : Tuple = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__snake_case : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("Attention entropies" )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info("Head importance scores" )
print_ad_tensor(UpperCamelCase__ )
logger.info("Head ranked by importance scores" )
__snake_case : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__snake_case : Tuple = torch.arange(
head_importance.numel() , device=args.device )
__snake_case : List[str] = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCAmelCase__( lowercase : str , lowercase : Any , lowercase : Any ) -> Optional[Any]:
__snake_case , __snake_case , __snake_case : Union[str, Any] = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
__snake_case : Optional[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info("Pruning: original score: %f, threshold: %f" , UpperCamelCase__ , original_score * args.masking_threshold )
__snake_case : Optional[Any] = torch.ones_like(UpperCamelCase__ )
__snake_case : Optional[int] = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__snake_case : Any = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case : Optional[int] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case : Union[str, Any] = float("Inf" )
__snake_case : List[Any] = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print("BREAK BY num_to_mask" )
break
# mask heads
__snake_case : Dict = current_heads_to_mask[:num_to_mask]
logger.info("Heads to mask: %s" , str(current_heads_to_mask.tolist() ) )
__snake_case : List[str] = new_head_mask.view(-1 )
__snake_case : Any = 0.0
__snake_case : Tuple = new_head_mask.view_as(UpperCamelCase__ )
__snake_case : Union[str, Any] = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
__snake_case , __snake_case , __snake_case : List[Any] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__snake_case : Optional[int] = 1 / loss
logger.info(
"Masking: current score: %f, remaining heads %d (%.1f percents)" , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("Final head mask" )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , "head_mask.npy" ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Tuple , lowercase : List[Any] , lowercase : Dict ) -> Optional[int]:
__snake_case : List[str] = datetime.now()
__snake_case , __snake_case , __snake_case : List[Any] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
__snake_case : Tuple = 1 / loss
__snake_case : Optional[int] = datetime.now() - before_time
__snake_case : Any = sum(p.numel() for p in model.parameters() )
__snake_case : int = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__snake_case : List[Any] = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
__snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
__snake_case : Optional[Any] = datetime.now()
__snake_case , __snake_case , __snake_case : List[str] = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
__snake_case : Any = 1 / loss
__snake_case : List[str] = datetime.now() - before_time
logger.info(
"Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)" , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info("Pruning: score with masking: %f score with pruning: %f" , UpperCamelCase__ , UpperCamelCase__ )
logger.info("Pruning: speed ratio (original timing / new timing): %f percents" , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCAmelCase__( ) -> Union[str, Any]:
__snake_case : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The input data dir. Should contain the .tsv files (or other data files) for the task." , )
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--output_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help="The output directory where the model predictions and checkpoints will be written." , )
# Other parameters
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase__ , help="Pretrained config name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--tokenizer_name" , default="" , type=UpperCamelCase__ , help="Pretrained tokenizer name or path if not the same as model_name_or_path" , )
parser.add_argument(
"--cache_dir" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="Where do you want to store the pre-trained models downloaded from s3" , )
parser.add_argument(
"--data_subset" , type=UpperCamelCase__ , default=-1 , help="If > 0: limit the data to a subset of data_subset instances." )
parser.add_argument(
"--overwrite_output_dir" , action="store_true" , help="Whether to overwrite data in output directory" )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
parser.add_argument(
"--dont_normalize_importance_by_layer" , action="store_true" , help="Don\'t normalize importance score by layers" )
parser.add_argument(
"--dont_normalize_global_importance" , action="store_true" , help="Don\'t normalize all importance scores between 0 and 1" , )
parser.add_argument(
"--try_masking" , action="store_true" , help="Whether to try to mask head until a threshold of accuracy." )
parser.add_argument(
"--masking_threshold" , default=0.9 , type=UpperCamelCase__ , help="masking threshold in term of metrics (stop masking when metric < threshold * original metric value)." , )
parser.add_argument(
"--masking_amount" , default=0.1 , type=UpperCamelCase__ , help="Amount to heads to masking at each masking step." )
parser.add_argument("--metric_name" , default="acc" , type=UpperCamelCase__ , help="Metric to use for head masking." )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, sequences shorter padded."
) , )
parser.add_argument("--batch_size" , default=1 , type=UpperCamelCase__ , help="Batch size." )
parser.add_argument("--seed" , type=UpperCamelCase__ , default=42 )
parser.add_argument("--local_rank" , type=UpperCamelCase__ , default=-1 , help="local_rank for distributed training on gpus" )
parser.add_argument("--no_cuda" , action="store_true" , help="Whether not to use CUDA when available" )
parser.add_argument("--server_ip" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=UpperCamelCase__ , default="" , help="Can be used for distant debugging." )
__snake_case : int = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case : Optional[int] = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu" )
__snake_case : str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__snake_case : List[Any] = torch.device("cuda" , args.local_rank )
__snake_case : List[str] = 1
torch.distributed.init_process_group(backend="nccl" ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info("device: {} n_gpu: {}, distributed: {}".format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__snake_case : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__snake_case : Optional[int] = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
__snake_case : List[Any] = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , "run_args.bin" ) )
logger.info("Training/evaluation parameters %s" , UpperCamelCase__ )
# Prepare dataset
__snake_case : Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__snake_case : Tuple = (torch.from_numpy(UpperCamelCase__ ),)
__snake_case : Optional[int] = TensorDataset(*UpperCamelCase__ )
__snake_case : List[str] = RandomSampler(UpperCamelCase__ )
__snake_case : int = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case : List[str] = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 326
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273
| 0
|
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> int:
return int((input_a, input_a).count(0) != 0)
def SCREAMING_SNAKE_CASE ( ) -> None:
assert nand_gate(0 , 0) == 1
assert nand_gate(0 , 1) == 1
assert nand_gate(1 , 0) == 1
assert nand_gate(1 , 1) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 351
|
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a__ ( UpperCamelCase__ , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : Tuple = RoFormerTokenizerFast
a : Dict = True
a : Optional[Any] = True
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
super().setUp()
def lowerCAmelCase_ ( self , **A ) -> Tuple:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self , **A ) -> Union[str, Any]:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **A )
def lowerCAmelCase_ ( self ) -> Tuple:
'''simple docstring'''
a = "永和服装饰品有限公司,今天天气非常好"
a = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = self.get_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
a = self.get_rust_tokenizer()
a , a = self.get_chinese_input_output_texts()
a = tokenizer.tokenize(A )
self.assertListEqual(A , output_text.split() )
a = tokens + [tokenizer.unk_token]
a = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A )
def lowerCAmelCase_ ( self ) -> Any:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase_ ( self ) -> Optional[int]:
'''simple docstring'''
pass
| 180
| 0
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
_UpperCAmelCase : int = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def UpperCAmelCase__ ( lowerCamelCase ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
return max(metric_fn(__UpperCamelCase, __UpperCamelCase ) for gt in ground_truths )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = [line.strip() for line in open(__UpperCamelCase, "r" ).readlines()]
lowercase :Any = []
if args.gold_data_mode == "qa":
lowercase :Any = pd.read_csv(__UpperCamelCase, sep="\t", header=__UpperCamelCase )
for answer_list in data[1]:
lowercase :List[str] = ast.literal_eval(__UpperCamelCase )
answers.append(__UpperCamelCase )
else:
lowercase :Union[str, Any] = [line.strip() for line in open(__UpperCamelCase, "r" ).readlines()]
lowercase :Tuple = [[reference] for reference in references]
lowercase :Union[str, Any] = 0
for prediction, ground_truths in zip(__UpperCamelCase, __UpperCamelCase ):
total += 1
em += metric_max_over_ground_truths(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
fa += metric_max_over_ground_truths(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
lowercase :Union[str, Any] = 100.0 * em / total
lowercase :Optional[int] = 100.0 * fa / total
logger.info(F"F1: {fa:.2f}" )
logger.info(F"EM: {em:.2f}" )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :List[Any] = args.k
lowercase :Tuple = [line.strip() for line in open(__UpperCamelCase, "r" ).readlines()]
lowercase :int = [line.strip() for line in open(__UpperCamelCase, "r" ).readlines()]
lowercase :Optional[int] = 0
for hypo, reference in zip(__UpperCamelCase, __UpperCamelCase ):
lowercase :str = set(hypo.split("\t" )[:k] )
lowercase :Tuple = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowercase :str = 100.0 * em / total
logger.info(F"Precision@{k}: {em: .2f}" )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
def strip_title(lowerCamelCase ):
if title.startswith("\"" ):
lowercase :int = title[1:]
if title.endswith("\"" ):
lowercase :Optional[int] = title[:-1]
return title
lowercase :Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase, return_tensors="pt", padding=__UpperCamelCase, truncation=__UpperCamelCase, )["input_ids"].to(args.device )
lowercase :Dict = rag_model.rag.question_encoder(__UpperCamelCase )
lowercase :int = question_enc_outputs[0]
lowercase :Any = rag_model.retriever(
__UpperCamelCase, question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", )
lowercase :Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowercase :Dict = []
for docs in all_docs:
lowercase :Tuple = [strip_title(__UpperCamelCase ) for title in docs["title"]]
provenance_strings.append("\t".join(__UpperCamelCase ) )
return provenance_strings
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
with torch.no_grad():
lowercase :Optional[int] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
__UpperCamelCase, return_tensors="pt", padding=__UpperCamelCase, truncation=__UpperCamelCase )
lowercase :Union[str, Any] = inputs_dict.input_ids.to(args.device )
lowercase :int = inputs_dict.attention_mask.to(args.device )
lowercase :Optional[Any] = rag_model.generate( # rag_model overwrites generate
__UpperCamelCase, attention_mask=__UpperCamelCase, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=__UpperCamelCase, num_return_sequences=1, bad_words_ids=[[0, 0]], )
lowercase :int = rag_model.retriever.generator_tokenizer.batch_decode(__UpperCamelCase, skip_special_tokens=__UpperCamelCase )
if args.print_predictions:
for q, a in zip(__UpperCamelCase, __UpperCamelCase ):
logger.info("Q: {} - A: {}".format(__UpperCamelCase, __UpperCamelCase ) )
return answers
def UpperCAmelCase__ ( ):
lowercase :Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_type", choices=["rag_sequence", "rag_token", "bart"], type=__UpperCamelCase, help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
), )
parser.add_argument(
"--index_name", default=__UpperCamelCase, choices=["exact", "compressed", "legacy"], type=__UpperCamelCase, help="RAG model retriever type", )
parser.add_argument(
"--index_path", default=__UpperCamelCase, type=__UpperCamelCase, help="Path to the retrieval index", )
parser.add_argument("--n_docs", default=5, type=__UpperCamelCase, help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path", default=__UpperCamelCase, type=__UpperCamelCase, required=__UpperCamelCase, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", )
parser.add_argument(
"--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=__UpperCamelCase, help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
), )
parser.add_argument("--k", default=1, type=__UpperCamelCase, help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set", default=__UpperCamelCase, type=__UpperCamelCase, required=__UpperCamelCase, help="Path to a file containing evaluation samples", )
parser.add_argument(
"--gold_data_path", default=__UpperCamelCase, type=__UpperCamelCase, required=__UpperCamelCase, help="Path to a tab-separated file with gold samples", )
parser.add_argument(
"--gold_data_mode", default="qa", type=__UpperCamelCase, choices=["qa", "ans"], help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
), )
parser.add_argument(
"--predictions_path", type=__UpperCamelCase, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", )
parser.add_argument(
"--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", )
parser.add_argument(
"--eval_batch_size", default=8, type=__UpperCamelCase, help="Batch size per GPU/CPU for evaluation.", )
parser.add_argument(
"--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", )
parser.add_argument(
"--num_beams", default=4, type=__UpperCamelCase, help="Number of beams to be used when generating answers", )
parser.add_argument("--min_length", default=1, type=__UpperCamelCase, help="Min length of the generated answers" )
parser.add_argument("--max_length", default=50, type=__UpperCamelCase, help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", )
parser.add_argument(
"--print_docs", action="store_true", help="If True, prints docs retried while generating.", )
lowercase :Dict = parser.parse_args()
lowercase :Optional[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = {}
if args.model_type is None:
lowercase :List[str] = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
lowercase :Optional[Any] = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
lowercase :int = args.n_docs
if args.index_name is not None:
lowercase :Optional[int] = args.index_name
if args.index_path is not None:
lowercase :Tuple = args.index_path
else:
lowercase :Union[str, Any] = BartForConditionalGeneration
lowercase :Optional[int] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s", __UpperCamelCase )
lowercase :Dict = get_scores if args.eval_mode == "e2e" else get_precision_at_k
lowercase :int = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(__UpperCamelCase, args.predictions_path, args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(__UpperCamelCase ) )
logger.info(" Batch size = %d", args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
lowercase :int = RagRetriever.from_pretrained(__UpperCamelCase, **__UpperCamelCase )
lowercase :Any = model_class.from_pretrained(__UpperCamelCase, retriever=__UpperCamelCase, **__UpperCamelCase )
model.retriever.init_retrieval()
else:
lowercase :Tuple = model_class.from_pretrained(__UpperCamelCase, **__UpperCamelCase )
model.to(args.device )
with open(args.evaluation_set, "r" ) as eval_file, open(args.predictions_path, "w" ) as preds_file:
lowercase :Optional[Any] = []
for line in tqdm(__UpperCamelCase ):
questions.append(line.strip() )
if len(__UpperCamelCase ) == args.eval_batch_size:
lowercase :int = evaluate_batch_fn(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) + "\n" )
preds_file.flush()
lowercase :Optional[Any] = []
if len(__UpperCamelCase ) > 0:
lowercase :Tuple = evaluate_batch_fn(__UpperCamelCase, __UpperCamelCase, __UpperCamelCase )
preds_file.write("\n".join(__UpperCamelCase ) )
preds_file.flush()
score_fn(__UpperCamelCase, args.predictions_path, args.gold_data_path )
if __name__ == "__main__":
_UpperCAmelCase : Optional[int] = get_args()
main(args)
| 236
|
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
A : Tuple = get_tests_dir("fixtures")
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> Union[str, Any]:
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE_ = mock.Mock()
SCREAMING_SNAKE_CASE_ = 500
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = HTTPError
SCREAMING_SNAKE_CASE_ = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=__magic_name__ ) as mock_head:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("hf-internal-testing/tiny-random-wav2vec2" )
# This check we did call the fake head request
mock_head.assert_called()
def __A ( self : Optional[int] ) -> Tuple:
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json" )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@classmethod
def __A ( cls : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TOKEN
HfFolder.save_token(__magic_name__ )
@classmethod
def __A ( cls : int ) -> Optional[Any]:
try:
delete_repo(token=cls._token , repo_id="test-feature-extractor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-feature-extractor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-feature-extractor" )
except HTTPError:
pass
def __A ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="test-feature-extractor" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("valid_org/test-feature-extractor" , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-feature-extractor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
__magic_name__ , repo_id="valid_org/test-feature-extractor-org" , push_to_hub=__magic_name__ , use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained("valid_org/test-feature-extractor-org" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(__magic_name__ , getattr(__magic_name__ , __magic_name__ ) )
def __A ( self : List[Any] ) -> Dict:
CustomFeatureExtractor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ = CustomFeatureExtractor.from_pretrained(__magic_name__ )
feature_extractor.push_to_hub("test-dynamic-feature-extractor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor"} , )
SCREAMING_SNAKE_CASE_ = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=__magic_name__ )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , "CustomFeatureExtractor" )
| 118
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=7 ,__UpperCAmelCase=3 ,__UpperCAmelCase=30 ,__UpperCAmelCase=400 ,__UpperCAmelCase=True ,__UpperCAmelCase=None ,__UpperCAmelCase=True ,__UpperCAmelCase=[0.5, 0.5, 0.5] ,__UpperCAmelCase=[0.5, 0.5, 0.5] ,__UpperCAmelCase=True ,__UpperCAmelCase=1 / 255 ,__UpperCAmelCase=True ,) -> Optional[int]:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowerCAmelCase__ : Tuple = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = min_resolution
lowerCAmelCase__ : Optional[Any] = max_resolution
lowerCAmelCase__ : Tuple = do_resize
lowerCAmelCase__ : Dict = size
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : List[str] = image_mean
lowerCAmelCase__ : List[Any] = image_std
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : Optional[int] = rescale_factor
lowerCAmelCase__ : int = do_pad
def UpperCAmelCase_ ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> Optional[Any]:
if not batched:
lowerCAmelCase__ : Tuple = image_inputs[0]
if isinstance(__UpperCAmelCase ,Image.Image ):
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = image.size
else:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = image.shape[1], image.shape[2]
if w < h:
lowerCAmelCase__ : str = int(self.size["""shortest_edge"""] * h / w )
lowerCAmelCase__ : List[str] = self.size["""shortest_edge"""]
elif w > h:
lowerCAmelCase__ : Union[str, Any] = self.size["""shortest_edge"""]
lowerCAmelCase__ : List[Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowerCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""]
lowerCAmelCase__ : Optional[Any] = self.size["""shortest_edge"""]
else:
lowerCAmelCase__ : Optional[int] = []
for image in image_inputs:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCAmelCase__ : Union[str, Any] = max(__UpperCAmelCase ,key=lambda __UpperCAmelCase : item[0] )[0]
lowerCAmelCase__ : Union[str, Any] = max(__UpperCAmelCase ,key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = ConditionalDetrImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : List[Any] = ConditionalDetrImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""image_std""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase ,"""size""" ) )
def UpperCAmelCase_ ( self ) -> str:
lowerCAmelCase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=__UpperCAmelCase )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,Image.Image )
# Test not batched input
lowerCAmelCase__ : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self ) -> Dict:
# Initialize image_processing
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,np.ndarray )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : List[str] = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def UpperCAmelCase_ ( self ) -> List[str]:
# Initialize image_processing
lowerCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCAmelCase ,torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase ,torch.Tensor )
# Test not batched input
lowerCAmelCase__ : List[str] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
lowerCAmelCase__ : int = image_processing(__UpperCAmelCase ,return_tensors="""pt""" ).pixel_values
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.image_processor_tester.get_expected_values(__UpperCAmelCase ,batched=__UpperCAmelCase )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
# prepare image and target
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" ,"""r""" ) as f:
lowerCAmelCase__ : Tuple = json.loads(f.read() )
lowerCAmelCase__ : Dict = {"""image_id""": 3_9769, """annotations""": target}
# encode them
lowerCAmelCase__ : List[Any] = ConditionalDetrImageProcessor.from_pretrained("""microsoft/conditional-detr-resnet-50""" )
lowerCAmelCase__ : Tuple = image_processing(images=__UpperCAmelCase ,annotations=__UpperCAmelCase ,return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Dict = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : List[str] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,__UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : str = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,__UpperCAmelCase ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : int = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,__UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,__UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ : List[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,__UpperCAmelCase ) )
# verify orig_size
lowerCAmelCase__ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,__UpperCAmelCase ) )
# verify size
lowerCAmelCase__ : List[str] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,__UpperCAmelCase ) )
@slow
def UpperCAmelCase_ ( self ) -> str:
# prepare image, target and masks_path
lowerCAmelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" ,"""r""" ) as f:
lowerCAmelCase__ : str = json.loads(f.read() )
lowerCAmelCase__ : Union[str, Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
lowerCAmelCase__ : List[Any] = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowerCAmelCase__ : Optional[int] = ConditionalDetrImageProcessor(format="""coco_panoptic""" )
lowerCAmelCase__ : Union[str, Any] = image_processing(images=__UpperCAmelCase ,annotations=__UpperCAmelCase ,masks_path=__UpperCAmelCase ,return_tensors="""pt""" )
# verify pixel values
lowerCAmelCase__ : Any = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Dict = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
# verify area
lowerCAmelCase__ : List[str] = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] ,__UpperCAmelCase ) )
# verify boxes
lowerCAmelCase__ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape ,__UpperCAmelCase )
lowerCAmelCase__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] ,__UpperCAmelCase ,atol=1E-3 ) )
# verify image_id
lowerCAmelCase__ : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] ,__UpperCAmelCase ) )
# verify is_crowd
lowerCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] ,__UpperCAmelCase ) )
# verify class_labels
lowerCAmelCase__ : List[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] ,__UpperCAmelCase ) )
# verify masks
lowerCAmelCase__ : int = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() ,__UpperCAmelCase )
# verify orig_size
lowerCAmelCase__ : Dict = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] ,__UpperCAmelCase ) )
# verify size
lowerCAmelCase__ : str = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] ,__UpperCAmelCase ) )
| 184
|
'''simple docstring'''
from typing import Any
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ) -> Any:
lowerCAmelCase__ : Optional[Any] = data
lowerCAmelCase__ : str = None
def __repr__( self ) -> str:
return F"""Node({self.data})"""
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = None
def __iter__( self ) -> Any:
lowerCAmelCase__ : int = self.head
while node:
yield node.data
lowerCAmelCase__ : Optional[Any] = node.next
def __len__( self ) -> int:
return sum(1 for _ in self )
def __repr__( self ) -> str:
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __getitem__( self ,__UpperCAmelCase ) -> Any:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
lowerCAmelCase__ : List[Any] = self.head
for _ in range(__UpperCAmelCase ):
lowerCAmelCase__ : List[str] = current.next
lowerCAmelCase__ : Dict = data
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
self.insert_nth(len(self ) ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> None:
self.insert_nth(0 ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
lowerCAmelCase__ : str = Node(__UpperCAmelCase )
if self.head is None:
lowerCAmelCase__ : List[Any] = new_node
elif index == 0:
lowerCAmelCase__ : Dict = self.head # link new_node to head
lowerCAmelCase__ : int = new_node
else:
lowerCAmelCase__ : Any = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Tuple = temp.next
lowerCAmelCase__ : str = temp.next
lowerCAmelCase__ : Optional[int] = new_node
def UpperCAmelCase_ ( self ) -> None: # print every node data
print(self )
def UpperCAmelCase_ ( self ) -> Any:
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ) -> Any: # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self ,__UpperCAmelCase = 0 ) -> Any:
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
lowerCAmelCase__ : List[Any] = self.head # default first node
if index == 0:
lowerCAmelCase__ : str = self.head.next
else:
lowerCAmelCase__ : int = self.head
for _ in range(index - 1 ):
lowerCAmelCase__ : Union[str, Any] = temp.next
lowerCAmelCase__ : Optional[Any] = temp.next
lowerCAmelCase__ : Any = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ) -> bool:
return self.head is None
def UpperCAmelCase_ ( self ) -> None:
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : str = self.head
while current:
# Store the current node's next node.
lowerCAmelCase__ : Optional[Any] = current.next
# Make the current node's next point backwards
lowerCAmelCase__ : Optional[int] = prev
# Make the previous node be the current node
lowerCAmelCase__ : Optional[Any] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase__ : Optional[int] = next_node
# Return prev in order to put the head at the end
lowerCAmelCase__ : List[str] = prev
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(UpperCamelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(UpperCamelCase ) == i
linked_list.insert_nth(UpperCamelCase , i + 1 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(UpperCamelCase ) == 9
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase__ : str = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(UpperCamelCase ) == "->".join(str(UpperCamelCase ) for i in range(-8 , 1 ) )
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-192.5_5555,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase__ : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(UpperCamelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(UpperCamelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase__ : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase__ : Union[str, Any] = linked_list.delete_tail()
assert result == 12.2
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase__ : str = linked_list.delete_nth(10 )
assert result is None
assert (
str(UpperCamelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(UpperCamelCase )
assert (
str(UpperCamelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(UpperCamelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
from doctest import testmod
testmod()
lowerCAmelCase__ : str = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(UpperCamelCase )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
lowerCAmelCase__ : Dict = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(UpperCamelCase )
print(f"""length of linked_list is : {len(UpperCamelCase )}""" )
if __name__ == "__main__":
main()
| 184
| 1
|
from __future__ import annotations
def __magic_name__ ( __a : list[int | str] ):
'''simple docstring'''
create_state_space_tree(__a , [] , 0 , [0 for i in range(len(__a ) )] )
def __magic_name__ ( __a : list[int | str] , __a : list[int | str] , __a : int , __a : list[int] , ):
'''simple docstring'''
if index == len(__a ):
print(__a )
return
for i in range(len(__a ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(__a , __a , index + 1 , __a )
current_sequence.pop()
UpperCamelCase__ = False
lowerCamelCase_ = [3, 1, 2, 4]
generate_all_permutations(sequence)
lowerCamelCase_ = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 244
|
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
lowerCamelCase_ = ['''small''', '''medium''', '''large''']
lowerCamelCase_ = '''lm_head.decoder.weight'''
lowerCamelCase_ = '''lm_head.weight'''
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = torch.load(__a )
UpperCamelCase__ = d.pop(__a )
os.makedirs(__a , exist_ok=__a )
torch.save(__a , os.path.join(__a , __a ) )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
parser.add_argument('''--dialogpt_path''', default='''.''', type=str)
lowerCamelCase_ = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
lowerCamelCase_ = os.path.join(args.dialogpt_path, f'{MODEL}_ft.pkl')
lowerCamelCase_ = f'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 244
| 1
|
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase__ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n'
def lowerCAmelCase_ ( __A, __A, __A=8 ) -> Any:
'''simple docstring'''
UpperCAmelCase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
UpperCAmelCase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class A ( UpperCAmelCase_ ):
def __init__(self : Union[str, Any] , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : DDPMScheduler , __UpperCAmelCase : VQModel , ) -> Any:
"""simple docstring"""
super().__init__()
self.register_modules(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , movq=__UpperCAmelCase , )
UpperCAmelCase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase_ (self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any , __UpperCAmelCase : str , __UpperCAmelCase : str , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if latents is None:
UpperCAmelCase__ = randn_tensor(__UpperCAmelCase , generator=__UpperCAmelCase , device=__UpperCAmelCase , dtype=__UpperCAmelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
UpperCAmelCase__ = latents.to(__UpperCAmelCase )
UpperCAmelCase__ = latents * scheduler.init_noise_sigma
return latents
def lowercase_ (self : Any , __UpperCAmelCase : Any=0 ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCAmelCase__ = torch.device(f"""cuda:{gpu_id}""" )
UpperCAmelCase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__UpperCAmelCase , __UpperCAmelCase )
def lowercase_ (self : Dict , __UpperCAmelCase : List[str]=0 ) -> int:
"""simple docstring"""
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
UpperCAmelCase__ = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=__UpperCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
UpperCAmelCase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
UpperCAmelCase__ , UpperCAmelCase__ = cpu_offload_with_hook(__UpperCAmelCase , __UpperCAmelCase , prev_module_hook=__UpperCAmelCase )
# We'll offload the last model manually.
UpperCAmelCase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__UpperCAmelCase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__UpperCAmelCase )
def __call__(self : Optional[int] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : Union[torch.FloatTensor, List[torch.FloatTensor]] , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 5_1_2 , __UpperCAmelCase : int = 1_0_0 , __UpperCAmelCase : float = 4.0 , __UpperCAmelCase : int = 1 , __UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCAmelCase : Optional[torch.FloatTensor] = None , __UpperCAmelCase : Optional[str] = "pil" , __UpperCAmelCase : bool = True , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self._execution_device
UpperCAmelCase__ = guidance_scale > 1.0
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = torch.cat(__UpperCAmelCase , dim=0 )
UpperCAmelCase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
UpperCAmelCase__ = torch.cat(__UpperCAmelCase , dim=0 )
if do_classifier_free_guidance:
UpperCAmelCase__ = image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
UpperCAmelCase__ = negative_image_embeds.repeat_interleave(__UpperCAmelCase , dim=0 )
UpperCAmelCase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=__UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase , device=__UpperCAmelCase )
UpperCAmelCase__ = self.scheduler.timesteps
UpperCAmelCase__ = self.unet.config.in_channels
UpperCAmelCase__ , UpperCAmelCase__ = downscale_height_and_width(__UpperCAmelCase , __UpperCAmelCase , self.movq_scale_factor )
# create initial latent
UpperCAmelCase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , self.scheduler , )
for i, t in enumerate(self.progress_bar(__UpperCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCAmelCase__ = {"image_embeds": image_embeds}
UpperCAmelCase__ = self.unet(
sample=__UpperCAmelCase , timestep=__UpperCAmelCase , encoder_hidden_states=__UpperCAmelCase , added_cond_kwargs=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
if do_classifier_free_guidance:
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.chunk(2 )
UpperCAmelCase__ , UpperCAmelCase__ = variance_pred.chunk(2 )
UpperCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
UpperCAmelCase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
UpperCAmelCase__ , UpperCAmelCase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase__ = self.scheduler.step(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , generator=__UpperCAmelCase , )[0]
# post-processing
UpperCAmelCase__ = self.movq.decode(__UpperCAmelCase , force_not_quantize=__UpperCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
UpperCAmelCase__ = image * 0.5 + 0.5
UpperCAmelCase__ = image.clamp(0 , 1 )
UpperCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCAmelCase__ = self.numpy_to_pil(__UpperCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__UpperCAmelCase )
| 143
|
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class A :
def __init__(self : Tuple , __UpperCAmelCase : str , ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = 1_3
UpperCAmelCase__ = 7
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = 9_9
UpperCAmelCase__ = 3_2
UpperCAmelCase__ = 2
UpperCAmelCase__ = 4
UpperCAmelCase__ = 3_7
UpperCAmelCase__ = "gelu"
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 0.1
UpperCAmelCase__ = 5_1_2
UpperCAmelCase__ = 1_6
UpperCAmelCase__ = 2
UpperCAmelCase__ = 0.02
UpperCAmelCase__ = 3
UpperCAmelCase__ = 4
UpperCAmelCase__ = None
def lowercase_ (self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ (self : int , __UpperCAmelCase : int , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
UpperCAmelCase__ = [input_ids, input_mask]
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ (self : str , __UpperCAmelCase : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForMaskedLM(config=__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ (self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertForQuestionAnswering(config=__UpperCAmelCase )
UpperCAmelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForSequenceClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ (self : Optional[int] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.num_choices
UpperCAmelCase__ = TFDistilBertForMultipleChoice(__UpperCAmelCase )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = tf.tile(tf.expand_dims(__UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase_ (self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.num_labels
UpperCAmelCase__ = TFDistilBertForTokenClassification(__UpperCAmelCase )
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
UpperCAmelCase__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ (self : Any ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__UpperCAmelCase : Union[str, Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
__UpperCAmelCase : Optional[int] = (
{
'feature-extraction': TFDistilBertModel,
'fill-mask': TFDistilBertForMaskedLM,
'question-answering': TFDistilBertForQuestionAnswering,
'text-classification': TFDistilBertForSequenceClassification,
'token-classification': TFDistilBertForTokenClassification,
'zero-shot': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : str = False
def lowercase_ (self : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__UpperCAmelCase , dim=3_7 )
def lowercase_ (self : Any ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ (self : int ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCAmelCase )
def lowercase_ (self : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCAmelCase )
def lowercase_ (self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCAmelCase )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCAmelCase )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCAmelCase )
@slow
def lowercase_ (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
UpperCAmelCase__ = TFDistilBertModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowercase_ (self : List[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase__ = TFDistilBertModel.from_pretrained("distilbert-base-uncased" )
UpperCAmelCase__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase__ = model(__UpperCAmelCase )[0]
UpperCAmelCase__ = [1, 6, 7_6_8]
self.assertEqual(output.shape , __UpperCAmelCase )
UpperCAmelCase__ = tf.constant(
[
[
[0.19261885, -0.13732955, 0.4119799],
[0.22150156, -0.07422661, 0.39037204],
[0.22756018, -0.0896414, 0.3701467],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCAmelCase , atol=1E-4 )
| 143
| 1
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
A : Optional[Any] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
A : Any = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
A : List[str] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A (datasets.Metric ):
'''simple docstring'''
def a_ ( self : List[str] ) -> MetricInfo:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def a_ ( self : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : str , __lowerCAmelCase : str = 1 , __lowerCAmelCase : List[Any] = 4 , ) -> Dict[str, float]:
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=lowerCAmelCase__ , hypotheses=lowerCAmelCase__ , min_len=lowerCAmelCase__ , max_len=lowerCAmelCase__ )
}
| 274
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __a :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
pass
def snake_case_ ( snake_case ) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__lowerCAmelCase = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class __a ( unittest.TestCase ):
__lowercase : Dict = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: Optional[Any] = pipeline(
'document-question-answering' , model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , image_processor=lowerCAmelCase__ )
lowercase__: int = INVOICE_URL
lowercase__: Dict = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
lowercase__: str = 'What is the placebo?'
lowercase__: Any = [
{
'image': load_image(lowerCAmelCase__ ),
'question': question,
},
{
'image': image,
'question': question,
},
{
'image': image,
'question': question,
'word_boxes': word_boxes,
},
]
return dqa_pipeline, examples
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
lowercase__: str = dqa_pipeline(lowerCAmelCase__ , top_k=2 )
self.assertEqual(
lowerCAmelCase__ , [
[
{'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )},
{'score': ANY(lowerCAmelCase__ ), 'answer': ANY(lowerCAmelCase__ ), 'start': ANY(lowerCAmelCase__ ), 'end': ANY(lowerCAmelCase__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Union[str, Any] = pipeline('document-question-answering' , model='hf-internal-testing/tiny-random-layoutlmv2' )
lowercase__: Optional[Any] = INVOICE_URL
lowercase__: int = 'How many cats are there?'
lowercase__: List[str] = [
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019', 'start': 38, 'end': 39},
{'score': 0.0_0_0_1, 'answer': 'oy 2312/2019 DUE', 'start': 38, 'end': 40},
]
lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , lowerCAmelCase__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase__: str = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__: Tuple = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase__: int = './tests/fixtures/tests_samples/COCO/000000039769.png'
lowercase__: List[Any] = []
lowercase__: Optional[int] = []
lowercase__: Any = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , words=lowerCAmelCase__ , boxes=lowerCAmelCase__ , top_k=2 )
self.assertEqual(lowerCAmelCase__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
lowercase__: List[str] = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , )
lowercase__: int = INVOICE_URL
lowercase__: str = 'What is the invoice number?'
lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Any = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Optional[int] = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Any = pipeline(
'document-question-answering' , model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' , revision='9977165' , max_seq_len=50 , )
lowercase__: Optional[int] = INVOICE_URL
lowercase__: Union[str, Any] = 'What is the invoice number?'
lowercase__: Optional[Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Tuple = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Dict = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
lowercase__: Optional[Any] = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ )
lowercase__: Optional[Any] = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , )
lowercase__: List[str] = INVOICE_URL
lowercase__: Union[str, Any] = 'What is the invoice number?'
lowercase__: Dict = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowercase__: List[str] = dqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
lowercase__: int = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 , )
lowercase__: Any = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
# This model should also work if `image` is set to None
lowercase__: List[Any] = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
lowercase__: Any = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' , revision='3dc6de3' , add_prefix_space=lowerCAmelCase__ )
lowercase__: str = pipeline(
'document-question-answering' , model='impira/layoutlm-document-qa' , tokenizer=lowerCAmelCase__ , revision='3dc6de3' , max_seq_len=50 , )
lowercase__: Optional[Any] = INVOICE_URL
lowercase__: Optional[Any] = 'What is the invoice number?'
lowercase__: Optional[int] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
lowercase__: Any = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 , )
lowercase__: Optional[int] = list(zip(*apply_tesseract(load_image(lowerCAmelCase__ ) , lowerCAmelCase__ , '' ) ) )
# This model should also work if `image` is set to None
lowercase__: Tuple = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase__ , decimals=4 ) , [
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] , )
@slow
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__: List[Any] = pipeline(
'document-question-answering' , model='naver-clova-ix/donut-base-finetuned-docvqa' , tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) , feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' , )
lowercase__: int = INVOICE_URL
lowercase__: int = 'What is the invoice number?'
lowercase__: Union[str, Any] = dqa_pipeline(image=lowerCAmelCase__ , question=lowerCAmelCase__ , top_k=2 )
self.assertEqual(nested_simplify(lowerCAmelCase__ , decimals=4 ) , [{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
pass
| 196
| 0
|
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__A : int = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
__A : Any = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
__A : Union[str, Any] = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def SCREAMING_SNAKE_CASE__ (self : Any):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Value("string" , id="sequence"),
}) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def SCREAMING_SNAKE_CASE__ (self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : int="auto" , __SCREAMING_SNAKE_CASE : Any=-1 , __SCREAMING_SNAKE_CASE : str=0.9 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Optional[int]=5_0_0 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gpt2-large" , __SCREAMING_SNAKE_CASE : Any=-1 , __SCREAMING_SNAKE_CASE : List[str]=1_0_2_4 , __SCREAMING_SNAKE_CASE : str=2_5 , __SCREAMING_SNAKE_CASE : Dict=5 , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : List[str]=2_5 , ):
A = compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out
| 355
|
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( lowercase__ = 600_851_475_143 ):
"""simple docstring"""
try:
A = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
A = 2
A = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
A = i
while n % i == 0:
A = n // i
i += 1
return int(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 57
| 0
|
'''simple docstring'''
from functools import reduce
lowerCAmelCase_ : Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _lowerCamelCase ( lowercase : str = N ) -> int:
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda lowercase , lowercase : str(int(lowercase ) * int(lowercase ) ) , n[i : i + 13] ) )
for i in range(len(lowercase ) - 12 ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63
|
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Dict) -> Any:
_A = []
for part_id in partition_order:
_A = df.where(F'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(snake_case__):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Optional[Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(2)
_A = [1, 0]
_A = _generate_iterable_examples(snake_case__ , snake_case__) # Reverse the partitions.
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__)
for i, (row_id, row_dict) in enumerate(generate_fn()):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> int:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(10).repartition(1)
_A = SparkExamplesIterable(snake_case__)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Union[str, Any]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
_A = lambda snake_case__: x.reverse()
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0])
_A = SparkExamplesIterable(snake_case__).shuffle_data_sources(snake_case__)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> List[str]:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(20).repartition(4)
# Partitions 0 and 2
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_A = SparkExamplesIterable(snake_case__).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
_A = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3])
for i, (row_id, row_dict) in enumerate(snake_case__):
_A , _A = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def snake_case ( ) -> Tuple:
_A = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
_A = spark.range(100).repartition(1)
_A = Spark(snake_case__)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 180
| 0
|
"""simple docstring"""
UpperCAmelCase: List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
_lowercase : Dict = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
UpperCAmelCase: list[bool | None] = [None] * 10_000_000
UpperCAmelCase: int = True
UpperCAmelCase: Any = False
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_lowercase : Any = chain(next_number(__UpperCAmelCase ) )
_lowercase : Dict = number_chain
while number < 10000000:
_lowercase : str = number_chain
number *= 10
return number_chain
def __SCREAMING_SNAKE_CASE ( __UpperCAmelCase = 10000000 ):
for i in range(1 , __UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'{solution() = }')
| 336
|
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336
| 1
|
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : int = TypeVar("T")
class _lowercase ( Generic[T]):
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : bool = True ):
'''simple docstring'''
lowerCamelCase__ : dict[T, list[T]] = {} # dictionary of lists
lowerCamelCase__ : int = directed
def lowerCAmelCase ( self : List[Any] , __lowerCamelCase : T , __lowerCamelCase : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
self.adj_list[destination_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
lowerCamelCase__ : int = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(__lowerCamelCase )
lowerCamelCase__ : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowerCamelCase__ : Optional[int] = [destination_vertex]
lowerCamelCase__ : List[Any] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(__lowerCamelCase )
lowerCamelCase__ : Union[str, Any] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowerCamelCase__ : List[str] = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowerCamelCase__ : Optional[int] = [destination_vertex]
lowerCamelCase__ : Dict = []
return self
def __repr__( self : Tuple ):
'''simple docstring'''
return pformat(self.adj_list )
| 184
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : Optional[Any] = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class _lowercase ( lowercase__):
"""simple docstring"""
A__ = "xlm-roberta"
def __init__( self : Union[str, Any] , __lowerCamelCase : Optional[Any]=30522 , __lowerCamelCase : List[Any]=768 , __lowerCamelCase : Union[str, Any]=12 , __lowerCamelCase : int=12 , __lowerCamelCase : Dict=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.1 , __lowerCamelCase : str=512 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : List[str]=1E-1_2 , __lowerCamelCase : Optional[int]=1 , __lowerCamelCase : Union[str, Any]=0 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Dict="absolute" , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Tuple=None , **__lowerCamelCase : int , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
lowerCamelCase__ : str = vocab_size
lowerCamelCase__ : Any = hidden_size
lowerCamelCase__ : Optional[Any] = num_hidden_layers
lowerCamelCase__ : Union[str, Any] = num_attention_heads
lowerCamelCase__ : Any = hidden_act
lowerCamelCase__ : str = intermediate_size
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : Any = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = max_position_embeddings
lowerCamelCase__ : List[str] = type_vocab_size
lowerCamelCase__ : Tuple = initializer_range
lowerCamelCase__ : Tuple = layer_norm_eps
lowerCamelCase__ : Any = position_embedding_type
lowerCamelCase__ : Any = use_cache
lowerCamelCase__ : Any = classifier_dropout
class _lowercase ( lowercase__):
"""simple docstring"""
@property
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase__ : List[str] = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowerCamelCase__ : Any = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 184
| 1
|
from __future__ import annotations
def A(__a: str , __a: list[str] | None = None ):
lowerCAmelCase_ = word_bank or []
# create a table
lowerCAmelCase_ = len(__a ) + 1
lowerCAmelCase_ = []
for _ in range(__a ):
table.append([] )
# seed value
lowerCAmelCase_ = [[]] # because empty string has empty combination
# iterate through the indices
for i in range(__a ):
# condition
if table[i] != []:
for word in word_bank:
# slice condition
if target[i : i + len(__a )] == word:
lowerCAmelCase_ = [
[word, *way] for way in table[i]
]
# adds the word to every combination the current position holds
# now,push that combination to the table[i+len(word)]
table[i + len(__a )] += new_combinations
# combinations are in reverse order so reverse for better output
for combination in table[len(__a )]:
combination.reverse()
return table[len(__a )]
if __name__ == "__main__":
print(all_construct('''jwajalapa''', ['''jwa''', '''j''', '''w''', '''a''', '''la''', '''lapa''']))
print(all_construct('''rajamati''', ['''s''', '''raj''', '''amat''', '''raja''', '''ma''', '''i''', '''t''']))
print(
all_construct(
'''hexagonosaurus''',
['''h''', '''ex''', '''hex''', '''ag''', '''ago''', '''ru''', '''auru''', '''rus''', '''go''', '''no''', '''o''', '''s'''],
)
)
| 22
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCamelCase__ = {
'''configuration_encodec''': [
'''ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EncodecConfig''',
],
'''feature_extraction_encodec''': ['''EncodecFeatureExtractor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'''ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EncodecModel''',
'''EncodecPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 22
| 1
|
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ , snake_case__ : List[str] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : List[str] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : Optional[Any] = controlnet_params
snake_case__ : Any = 'bird'
snake_case__ : Any = jax.device_count()
snake_case__ : Any = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
snake_case__ : Optional[int] = pipe.prepare_image_inputs([canny_image] * num_samples )
snake_case__ : Any = jax.random.PRNGKey(0 )
snake_case__ : Dict = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : Any = replicate(__UpperCamelCase )
snake_case__ : Union[str, Any] = shard(__UpperCamelCase )
snake_case__ : Any = shard(__UpperCamelCase )
snake_case__ : Dict = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : Any = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : str = jnp.array(
[0.1_6_7_9_6_9, 0.1_1_6_6_9_9, 0.0_8_1_5_4_3, 0.1_5_4_2_9_7, 0.1_3_2_8_1_2, 0.1_0_8_8_8_7, 0.1_6_9_9_2_2, 0.1_6_9_9_2_2, 0.2_0_5_0_7_8] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def __a ( self ) -> Tuple:
'''simple docstring'''
snake_case__ , snake_case__ : Union[str, Any] = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose' , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ , snake_case__ : Optional[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , controlnet=__UpperCamelCase , from_pt=__UpperCamelCase , dtype=jnp.bfloataa )
snake_case__ : List[str] = controlnet_params
snake_case__ : Optional[Any] = 'Chef in the kitchen'
snake_case__ : List[Any] = jax.device_count()
snake_case__ : int = pipe.prepare_text_inputs([prompts] * num_samples )
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
snake_case__ : int = pipe.prepare_image_inputs([pose_image] * num_samples )
snake_case__ : Optional[Any] = jax.random.PRNGKey(0 )
snake_case__ : Any = jax.random.split(__UpperCamelCase , jax.device_count() )
snake_case__ : List[Any] = replicate(__UpperCamelCase )
snake_case__ : List[str] = shard(__UpperCamelCase )
snake_case__ : Optional[int] = shard(__UpperCamelCase )
snake_case__ : Any = pipe(
prompt_ids=__UpperCamelCase , image=__UpperCamelCase , params=__UpperCamelCase , prng_seed=__UpperCamelCase , num_inference_steps=50 , jit=__UpperCamelCase , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
snake_case__ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
snake_case__ : Optional[int] = images[0, 253:256, 253:256, -1]
snake_case__ : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
snake_case__ : Any = jnp.array(
[[0.2_7_1_4_8_4, 0.2_6_1_7_1_9, 0.2_7_5_3_9_1, 0.2_7_7_3_4_4, 0.2_7_9_2_9_7, 0.2_9_1_0_1_6, 0.2_9_4_9_2_2, 0.3_0_2_7_3_4, 0.3_0_2_7_3_4]] )
print(F"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 143
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase__ : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase__ : Optional[int] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase__ : Optional[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase__ : List[Any] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase__ : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase__ : Dict = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase__ : Optional[int] = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def UpperCamelCase__ ( ) -> Any:
snake_case__ , snake_case__ : List[str] = randrange(len(A__ ) ), randrange(len(A__ ) )
snake_case__ : str = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
snake_case__ , snake_case__ : Any = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def UpperCamelCase__ ( A__ = 100 ) -> Optional[int]:
return (generate_random_hand() for _ in range(A__ ))
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Any:
assert PokerHand(A__ )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Dict:
snake_case__ : Optional[int] = PokerHand(A__ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> str:
assert PokerHand(A__ )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ ) -> Optional[Any]:
assert PokerHand(A__ )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , A__ )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> int:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Union[str, Any]:
assert PokerHand(A__ ).compare_with(PokerHand(A__ ) ) == expected
def UpperCamelCase__ ( ) -> Union[str, Any]:
snake_case__ : Union[str, Any] = [PokerHand(A__ ) for hand in SORTED_HANDS]
snake_case__ : Optional[Any] = poker_hands.copy()
shuffle(A__ )
snake_case__ : Tuple = chain(sorted(A__ ) )
for index, hand in enumerate(A__ ):
assert hand == poker_hands[index]
def UpperCamelCase__ ( ) -> str:
# Test that five high straights are compared correctly.
snake_case__ : int = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=A__ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def UpperCamelCase__ ( ) -> Union[str, Any]:
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
snake_case__ : Optional[int] = PokerHand('2C 4S AS 3D 5C' )
snake_case__ : Optional[int] = True
snake_case__ : Tuple = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def UpperCamelCase__ ( ) -> List[str]:
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
snake_case__ : Any = 0
snake_case__ : Optional[Any] = os.path.abspath(os.path.dirname(A__ ) )
snake_case__ : List[str] = os.path.join(A__ , 'poker_hands.txt' )
with open(A__ ) as file_hand:
for line in file_hand:
snake_case__ : Tuple = line[:14].strip()
snake_case__ : List[str] = line[15:].strip()
snake_case__ , snake_case__ : Any = PokerHand(A__ ), PokerHand(A__ )
snake_case__ : Tuple = player.compare_with(A__ )
if output == "Win":
answer += 1
assert answer == 376
| 143
| 1
|
'''simple docstring'''
__snake_case : List[Any] = 256
# Modulus to hash a string
__snake_case : Tuple = 1_000_003
def _UpperCAmelCase ( _UpperCamelCase : str, _UpperCamelCase : str ) -> bool:
A_ = len(_UpperCamelCase )
A_ = len(_UpperCamelCase )
if p_len > t_len:
return False
A_ = 0
A_ = 0
A_ = 1
# Calculating the hash of pattern and substring of text
for i in range(_UpperCamelCase ):
A_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
A_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
A_ = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
A_ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _UpperCAmelCase ( ) -> None:
A_ = '''abc1abc12'''
A_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
A_ = '''alskfjaldsk23adsfabcabc'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase ) and not rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 2)
A_ = '''ABABX'''
A_ = '''ABABZABABYABABX'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 3)
A_ = '''AAAB'''
A_ = '''ABAAAAAB'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 4)
A_ = '''abcdabcy'''
A_ = '''abcxabcdabxabcdabcdabcy'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
# Test 5)
A_ = '''Lü'''
A_ = '''Lüsai'''
assert rabin_karp(_UpperCamelCase, _UpperCamelCase )
A_ = '''Lue'''
assert not rabin_karp(_UpperCamelCase, _UpperCamelCase )
print('''Success.''' )
if __name__ == "__main__":
test_rabin_karp()
| 18
|
'''simple docstring'''
from collections import defaultdict
def _UpperCAmelCase ( _UpperCamelCase : int ) -> int:
A_ = 1
A_ = True
for v in tree[start]:
if v not in visited:
ret += dfs(_UpperCamelCase )
if ret % 2 == 0:
cuts.append(_UpperCamelCase )
return ret
def _UpperCAmelCase ( ) -> Optional[Any]:
dfs(1 )
if __name__ == "__main__":
__snake_case , __snake_case : Union[str, Any] = 10, 9
__snake_case : int = defaultdict(list)
__snake_case : dict[int, bool] = {}
__snake_case : list[int] = []
__snake_case : Union[str, Any] = 0
__snake_case : int = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 18
| 1
|
'''simple docstring'''
from statistics import mean
import numpy as np
def _lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : int ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =0
# Number of processes finished
_SCREAMING_SNAKE_CASE =0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
_SCREAMING_SNAKE_CASE =[0] * no_of_process
# List to include calculation results
_SCREAMING_SNAKE_CASE =[0] * no_of_process
# Sort by arrival time.
_SCREAMING_SNAKE_CASE =[burst_time[i] for i in np.argsort(_UpperCamelCase )]
_SCREAMING_SNAKE_CASE =[process_name[i] for i in np.argsort(_UpperCamelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
_SCREAMING_SNAKE_CASE =0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
_SCREAMING_SNAKE_CASE =arrival_time[i]
_SCREAMING_SNAKE_CASE =0
# Index showing the location of the process being performed
_SCREAMING_SNAKE_CASE =0
# Saves the current response ratio.
_SCREAMING_SNAKE_CASE =0
for i in range(0 , _UpperCamelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
_SCREAMING_SNAKE_CASE =(burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
_SCREAMING_SNAKE_CASE =temp
_SCREAMING_SNAKE_CASE =i
# Calculate the turn around time
_SCREAMING_SNAKE_CASE =current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
_SCREAMING_SNAKE_CASE =1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def _lowerCAmelCase ( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : int ) -> list:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[0] * no_of_process
for i in range(0 , _UpperCamelCase ):
_SCREAMING_SNAKE_CASE =turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
lowerCamelCase : Dict = 5
lowerCamelCase : Optional[int] = ["A", "B", "C", "D", "E"]
lowerCamelCase : Tuple = [1, 2, 3, 4, 5]
lowerCamelCase : str = [1, 2, 3, 4, 5]
lowerCamelCase : Tuple = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
lowerCamelCase : Optional[Any] = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 47
|
"""simple docstring"""
import sys
from collections import defaultdict
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = []
def snake_case ( self , __a ):
return self.node_position[vertex]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = pos
def snake_case ( self , __a , __a , __a , __a ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCAmelCase = 2 * start + 1
else:
__lowerCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCAmelCase , __lowerCAmelCase = heap[smallest_child], positions[smallest_child]
__lowerCAmelCase , __lowerCAmelCase = (
heap[start],
positions[start],
)
__lowerCAmelCase , __lowerCAmelCase = temp, tempa
__lowerCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __a )
self.top_to_bottom(__a , __a , __a , __a )
def snake_case ( self , __a , __a , __a , __a ):
__lowerCAmelCase = position[index]
while index != 0:
__lowerCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCAmelCase = heap[parent]
__lowerCAmelCase = position[parent]
self.set_position(position[parent] , __a )
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , __a )
break
__lowerCAmelCase = parent
else:
__lowerCAmelCase = val
__lowerCAmelCase = temp
self.set_position(__a , 0 )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = len(__a ) // 2 - 1
for i in range(__a , -1 , -1 ):
self.top_to_bottom(__a , __a , len(__a ) , __a )
def snake_case ( self , __a , __a ):
__lowerCAmelCase = positions[0]
__lowerCAmelCase = sys.maxsize
self.top_to_bottom(__a , 0 , len(__a ) , __a )
return temp
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = Heap()
__lowerCAmelCase = [0] * len(_UpperCamelCase )
__lowerCAmelCase = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCAmelCase = []
for vertex in range(len(_UpperCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_UpperCamelCase )
heap.node_position.append(_UpperCamelCase )
__lowerCAmelCase = []
__lowerCAmelCase = 1
__lowerCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCAmelCase = 0
__lowerCAmelCase = distance
heap.heapify(_UpperCamelCase , _UpperCamelCase )
for _ in range(1 , len(_UpperCamelCase ) ):
__lowerCAmelCase = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_UpperCamelCase )]
):
__lowerCAmelCase = distance
heap.bottom_to_top(
_UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A : Optional[Any] = int(input("Enter number of edges: ").strip())
A : Dict = defaultdict(list)
for _ in range(edges_number):
A : str = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 57
| 0
|
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_UpperCamelCase : Any = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_UpperCamelCase : Optional[Any] = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_UpperCamelCase : Tuple = field(
default=_snake_case , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
lowercase__ = self.task_name.lower()
class SCREAMING_SNAKE_CASE (_snake_case ):
_UpperCamelCase : Any = 'train'
_UpperCamelCase : int = 'dev'
_UpperCamelCase : Dict = 'test'
class SCREAMING_SNAKE_CASE (_snake_case ):
_UpperCamelCase : str = 42
_UpperCamelCase : Tuple = 42
_UpperCamelCase : List[str] = 42
def __init__( self : int , a : Optional[int] , a : int , a : List[str] = None , a : Union[str, Any] = Split.train , a : Any = None , )-> List[Any]:
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCamelCase__ , )
lowercase__ = args
lowercase__ = glue_processors[args.task_name]()
lowercase__ = glue_output_modes[args.task_name]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
# Load data features from cache or dataset file
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
lowercase__ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ , lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '.lock'
with FileLock(UpperCamelCase__ ):
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(UpperCamelCase__ )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""" )
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase__ = self.processor.get_test_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase__ = examples[:limit_length]
lowercase__ = glue_convert_examples_to_features(
UpperCamelCase__ , UpperCamelCase__ , max_length=args.max_seq_length , label_list=UpperCamelCase__ , output_mode=self.output_mode , )
lowercase__ = time.time()
torch.save(self.features , UpperCamelCase__ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : List[str] )-> str:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[Any] , a : int )-> InputFeatures:
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
return self.label_list
| 363
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if is_torch_version('<' , '2.0.0' ) or not hasattr(_SCREAMING_SNAKE_CASE , '_dynamo' ):
return False
return isinstance(_SCREAMING_SNAKE_CASE , torch._dynamo.eval_frame.OptimizedModule )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ) -> Dict:
lowercase__ = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
lowercase__ = is_compiled_module(_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = model.module
if not keep_fpaa_wrapper:
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'forward' )
lowercase__ = model.__dict__.pop('_original_forward' , _SCREAMING_SNAKE_CASE )
if original_forward is not None:
while hasattr(_SCREAMING_SNAKE_CASE , '__wrapped__' ):
lowercase__ = forward.__wrapped__
if forward == original_forward:
break
lowercase__ = forward
if getattr(_SCREAMING_SNAKE_CASE , '_converted_to_transformer_engine' , _SCREAMING_SNAKE_CASE ):
convert_model(_SCREAMING_SNAKE_CASE , to_transformer_engine=_SCREAMING_SNAKE_CASE )
if is_compiled:
lowercase__ = model
lowercase__ = compiled_model
return model
def __UpperCamelCase () -> Tuple:
PartialState().wait_for_everyone()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif PartialState().local_process_index == 0:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@contextmanager
def __UpperCamelCase (**_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
for key, value in kwargs.items():
lowercase__ = str(_SCREAMING_SNAKE_CASE )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
if not hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ) and not hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , '__class__' , _SCREAMING_SNAKE_CASE )
if hasattr(_SCREAMING_SNAKE_CASE , '__qualname__' ):
return obj.__qualname__
if hasattr(_SCREAMING_SNAKE_CASE , '__name__' ):
return obj.__name__
return str(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in source.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = destination.setdefault(_SCREAMING_SNAKE_CASE , {} )
merge_dicts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
lowercase__ = value
return destination
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> bool:
if port is None:
lowercase__ = 29500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0
| 269
| 0
|
_lowerCamelCase : Any = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : int = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : Dict = False
def a__ ( UpperCAmelCase : int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
UpperCAmelCase : Optional[int] = chain(next_number(UpperCAmelCase ) )
UpperCAmelCase : Union[str, Any] = number_chain
while number < 10_000_000:
UpperCAmelCase : Any = number_chain
number *= 10
return number_chain
def a__ ( UpperCAmelCase : int = 10_000_000 ) -> int:
for i in range(1 , UpperCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{solution() = }""")
| 336
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : List[str] = {
'configuration_convbert': ['CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvBertConfig', 'ConvBertOnnxConfig'],
'tokenization_convbert': ['ConvBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any] = ['ConvBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
'CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvBertForMaskedLM',
'ConvBertForMultipleChoice',
'ConvBertForQuestionAnswering',
'ConvBertForSequenceClassification',
'ConvBertForTokenClassification',
'ConvBertLayer',
'ConvBertModel',
'ConvBertPreTrainedModel',
'load_tf_weights_in_convbert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Dict = [
'TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFConvBertForMaskedLM',
'TFConvBertForMultipleChoice',
'TFConvBertForQuestionAnswering',
'TFConvBertForSequenceClassification',
'TFConvBertForTokenClassification',
'TFConvBertLayer',
'TFConvBertModel',
'TFConvBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
_a : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 360
|
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_a : Optional[int] = 'bert-base-cased'
_a : Optional[Any] = 'google/pegasus-xsum'
_a : Union[str, Any] = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_a : int = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_a : Union[str, Any] = 'patrickvonplaten/t5-tiny-random'
_a : Tuple = 'sshleifer/bart-tiny-random'
_a : str = 'sshleifer/tiny-mbart'
_a : Optional[int] = 'sshleifer/tiny-marian-en-de'
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Path ,_lowerCamelCase : list ) -> str:
_lowerCAmelCase : List[Any] = """\n""".join(_lowerCamelCase )
Path(_lowerCamelCase ).open("""w""" ).writelines(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCamelCase ,f"{split}.source" ) ,_lowerCamelCase )
_dump_articles(os.path.join(_lowerCamelCase ,f"{split}.target" ) ,_lowerCamelCase )
return tmp_dir
class __A ( SCREAMING_SNAKE_CASE_ ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def __A ( self , a__ ):
_lowerCAmelCase : str = AutoTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Any = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCAmelCase : Union[str, Any] = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
_lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
_lowerCAmelCase : str = 4
_lowerCAmelCase : Optional[int] = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
_lowerCAmelCase : Optional[int] = SeqaSeqDataset(
a__ , data_dir=a__ , type_path="""train""" , max_source_length=a__ , max_target_length=a__ , src_lang=a__ , tgt_lang=a__ , )
_lowerCAmelCase : int = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(a__ , a__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
_lowerCAmelCase : Any = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def __A ( self , a__ ):
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Union[str, Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
_lowerCAmelCase : Optional[int] = max(len(tokenizer.encode(a__ ) ) for a in ARTICLES )
_lowerCAmelCase : Any = max(len(tokenizer.encode(a__ ) ) for a in SUMMARIES )
_lowerCAmelCase : int = 4
_lowerCAmelCase : List[str] = LegacySeqaSeqDataset(
a__ , data_dir=a__ , type_path="""train""" , max_source_length=20 , max_target_length=a__ , )
_lowerCAmelCase : List[Any] = DataLoader(a__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def __A ( self ):
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
_lowerCAmelCase : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
_lowerCAmelCase : List[Any] = tmp_dir.joinpath("""train.source""" ).open().readlines()
_lowerCAmelCase : str = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(a__ , a__ , 128 , a__ )
_lowerCAmelCase : List[Any] = {x.name for x in tmp_dir.iterdir()}
_lowerCAmelCase : Tuple = {x.name for x in save_dir.iterdir()}
_lowerCAmelCase : Union[str, Any] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(a__ ) < len(a__ )
assert len(a__ ) == 1
assert len(packed_examples[0] ) == sum(len(a__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def __A ( self ):
if not FAIRSEQ_AVAILABLE:
return
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = self._get_dataset(max_len=64 )
_lowerCAmelCase : Optional[int] = 64
_lowerCAmelCase : str = ds.make_dynamic_sampler(a__ , required_batch_size_multiple=a__ )
_lowerCAmelCase : int = [len(a__ ) for x in batch_sampler]
assert len(set(a__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(a__ ) == len(a__ ) # no dropped or added examples
_lowerCAmelCase : List[str] = DataLoader(a__ , batch_sampler=a__ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = []
for batch in data_loader:
_lowerCAmelCase : int = batch["""input_ids"""].shape
_lowerCAmelCase : Union[str, Any] = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
_lowerCAmelCase : List[str] = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(a__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(a__ )
assert num_src_per_batch[0] == max(a__ )
if failures:
raise AssertionError(F"too many tokens in {len(a__ )} batches" )
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = self._get_dataset(max_len=512 )
_lowerCAmelCase : int = 2
_lowerCAmelCase : List[str] = ds.make_sortish_sampler(a__ , shuffle=a__ )
_lowerCAmelCase : Dict = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 )
_lowerCAmelCase : int = DataLoader(a__ , batch_size=a__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=a__ )
_lowerCAmelCase : int = tokenizer.pad_token_id
def count_pad_tokens(a__ , a__="input_ids" ):
return [batch[k].eq(a__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(a__ , k="""labels""" ) ) < sum(count_pad_tokens(a__ , k="""labels""" ) )
assert sum(count_pad_tokens(a__ ) ) < sum(count_pad_tokens(a__ ) )
assert len(a__ ) == len(a__ )
def __A ( self , a__=1000 , a__=128 ):
if os.getenv("""USE_REAL_DATA""" , a__ ):
_lowerCAmelCase : List[str] = """examples/seq2seq/wmt_en_ro"""
_lowerCAmelCase : str = max_len * 2 * 64
if not Path(a__ ).joinpath("""train.len""" ).exists():
save_len_file(a__ , a__ )
else:
_lowerCAmelCase : List[str] = """examples/seq2seq/test_data/wmt_en_ro"""
_lowerCAmelCase : Dict = max_len * 4
save_len_file(a__ , a__ )
_lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(a__ )
_lowerCAmelCase : Dict = SeqaSeqDataset(
a__ , data_dir=a__ , type_path="""train""" , max_source_length=a__ , max_target_length=a__ , n_obs=a__ , )
return ds, max_tokens, tokenizer
def __A ( self ):
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._get_dataset()
_lowerCAmelCase : Any = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=a__ ) )
_lowerCAmelCase : Optional[int] = set(DistributedSortishSampler(a__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=a__ ) )
assert idsa.intersection(a__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def __A ( self , a__ ):
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(a__ , use_fast=a__ )
if tok_name == MBART_TINY:
_lowerCAmelCase : Dict = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
_lowerCAmelCase : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
_lowerCAmelCase : List[Any] = SeqaSeqDataset(
a__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
_lowerCAmelCase : Tuple = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(a__ ) == 1 if tok_name == BART_TINY else len(a__ ) == 0
| 126
| 0
|
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class A_ ( lowerCAmelCase_ ):
def __init__( self : List[Any] , snake_case_ : Union[str, "sqlalchemy.sql.Selectable"] , snake_case_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , snake_case_ : Optional[Features] = None , snake_case_ : str = None , snake_case_ : bool = False , **snake_case_ : int , ):
super().__init__(features=snake_case_ , cache_dir=snake_case_ , keep_in_memory=snake_case_ , **snake_case_ )
_UpperCAmelCase = Sql(
cache_dir=snake_case_ , features=snake_case_ , sql=snake_case_ , con=snake_case_ , **snake_case_ , )
def lowercase ( self : Tuple ):
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=snake_case_ , download_mode=snake_case_ , verification_mode=snake_case_ , base_path=snake_case_ , )
# Build dataset for splits
_UpperCAmelCase = self.builder.as_dataset(
split="train" , verification_mode=snake_case_ , in_memory=self.keep_in_memory )
return dataset
class A_ :
def __init__( self : List[Any] , snake_case_ : Dataset , snake_case_ : str , snake_case_ : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] , snake_case_ : Optional[int] = None , snake_case_ : Optional[int] = None , **snake_case_ : Union[str, Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
_UpperCAmelCase = dataset
_UpperCAmelCase = name
_UpperCAmelCase = con
_UpperCAmelCase = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_UpperCAmelCase = num_proc
_UpperCAmelCase = to_sql_kwargs
def lowercase ( self : Dict ):
_UpperCAmelCase = self.to_sql_kwargs.pop("sql" , snake_case_ )
_UpperCAmelCase = self.to_sql_kwargs.pop("con" , snake_case_ )
_UpperCAmelCase = self.to_sql_kwargs.pop("index" , snake_case_ )
_UpperCAmelCase = self._write(index=snake_case_ , **self.to_sql_kwargs )
return written
def lowercase ( self : Tuple , snake_case_ : Any ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = args
_UpperCAmelCase = {**to_sql_kwargs, "if_exists": "append"} if offset > 0 else to_sql_kwargs
_UpperCAmelCase = query_table(
table=self.dataset.data , key=slice(snake_case_ , offset + self.batch_size ) , indices=self.dataset._indices , )
_UpperCAmelCase = batch.to_pandas()
_UpperCAmelCase = df.to_sql(self.name , self.con , index=snake_case_ , **snake_case_ )
return num_rows or len(snake_case_ )
def lowercase ( self : Optional[int] , snake_case_ : Optional[Any] , **snake_case_ : str ):
_UpperCAmelCase = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_UpperCAmelCase , _UpperCAmelCase = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , snake_case_ , snake_case_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 22
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCAmelCase_ ( __lowercase : str ) -> List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = image.size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
_UpperCAmelCase = np.array(__lowercase ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image[None].transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = torch.from_numpy(__lowercase )
return 2.0 * image - 1.0
class A_ ( lowerCAmelCase_ ):
def __init__( self : Optional[Any] , snake_case_ : VQModel , snake_case_ : UNetaDModel , snake_case_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=snake_case_ , unet=snake_case_ , scheduler=snake_case_ )
@torch.no_grad()
def __call__( self : Any , snake_case_ : Union[torch.Tensor, PIL.Image.Image] = None , snake_case_ : Optional[int] = 1 , snake_case_ : Optional[int] = 1_0_0 , snake_case_ : Optional[float] = 0.0 , snake_case_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case_ : Optional[str] = "pil" , snake_case_ : bool = True , ):
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = 1
elif isinstance(snake_case_ , torch.Tensor ):
_UpperCAmelCase = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(snake_case_ )}' )
if isinstance(snake_case_ , PIL.Image.Image ):
_UpperCAmelCase = preprocess(snake_case_ )
_UpperCAmelCase , _UpperCAmelCase = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
_UpperCAmelCase = (batch_size, self.unet.config.in_channels // 2, height, width)
_UpperCAmelCase = next(self.unet.parameters() ).dtype
_UpperCAmelCase = randn_tensor(snake_case_ , generator=snake_case_ , device=self.device , dtype=snake_case_ )
_UpperCAmelCase = image.to(device=self.device , dtype=snake_case_ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(snake_case_ , device=self.device )
_UpperCAmelCase = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
_UpperCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCAmelCase = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCAmelCase = {}
if accepts_eta:
_UpperCAmelCase = eta
for t in self.progress_bar(snake_case_ ):
# concat latents and low resolution image in the channel dimension.
_UpperCAmelCase = torch.cat([latents, image] , dim=1 )
_UpperCAmelCase = self.scheduler.scale_model_input(snake_case_ , snake_case_ )
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case_ , snake_case_ ).sample
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
# decode the image latents with the VQVAE
_UpperCAmelCase = self.vqvae.decode(snake_case_ ).sample
_UpperCAmelCase = torch.clamp(snake_case_ , -1.0 , 1.0 )
_UpperCAmelCase = image / 2 + 0.5
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 22
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''facebook/levit-128S''': '''https://huggingface.co/facebook/levit-128S/resolve/main/config.json''',
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : int = "levit"
def __init__( self , lowerCamelCase_=2_24 , lowerCamelCase_=3 , lowerCamelCase_=3 , lowerCamelCase_=2 , lowerCamelCase_=1 , lowerCamelCase_=16 , lowerCamelCase_=[1_28, 2_56, 3_84] , lowerCamelCase_=[4, 8, 12] , lowerCamelCase_=[4, 4, 4] , lowerCamelCase_=[16, 16, 16] , lowerCamelCase_=0 , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=[2, 2, 2] , lowerCamelCase_=0.02 , **lowerCamelCase_ , ) -> Union[str, Any]:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = image_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = kernel_size
lowerCAmelCase__ = stride
lowerCAmelCase__ = padding
lowerCAmelCase__ = hidden_sizes
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = depths
lowerCAmelCase__ = key_dim
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = attention_ratio
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = [
['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Optional[Any] = version.parse("1.11" )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> float:
return 1e-4
| 228
|
'''simple docstring'''
from __future__ import annotations
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ ) -> None:
lowerCAmelCase__ = order
# a_{0} ... a_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
lowerCAmelCase__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
lowerCAmelCase__ = [0.0] * self.order
# y[n-1] ... y[n-k]
lowerCAmelCase__ = [0.0] * self.order
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
if len(lowerCamelCase_ ) < self.order:
lowerCAmelCase__ = [1.0, *a_coeffs]
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected a_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
if len(lowerCamelCase_ ) != self.order + 1:
lowerCAmelCase__ = (
F"""Expected b_coeffs to have {self.order + 1} elements """
F"""for {self.order}-order filter, got {len(lowerCamelCase_ )}"""
)
raise ValueError(lowerCamelCase_ )
lowerCAmelCase__ = a_coeffs
lowerCAmelCase__ = b_coeffs
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> float:
lowerCAmelCase__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
lowerCAmelCase__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
lowerCAmelCase__ = self.input_history[:-1]
lowerCAmelCase__ = self.output_history[:-1]
lowerCAmelCase__ = sample
lowerCAmelCase__ = result
return result
| 228
| 1
|
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
__lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
__lowerCamelCase : int = get_tests_dir('''fixtures/vocab.json''')
__lowerCamelCase : Any = get_tests_dir('''fixtures''')
class a__ ( unittest.TestCase ):
A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 0
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : str = WavaVecaConfig()
SCREAMING_SNAKE_CASE_ : str = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(_A )
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_A,os.path.join(_A,_A ) )
copyfile(_A,os.path.join(_A,"vocab.json" ) )
SCREAMING_SNAKE_CASE_ : str = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : List[Any] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = WavaVecaProcessor(_A,_A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in tokenizer
with open(os.path.join(_A,_A ),"r" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[int] = json.load(_A )
config_dict.pop("processor_class" )
with open(os.path.join(_A,_A ),"w" ) as f:
f.write(json.dumps(_A ) )
SCREAMING_SNAKE_CASE_ : List[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaFeatureExtractor()
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaProcessor(_A,_A )
# save in new folder
processor.save_pretrained(_A )
# drop `processor_class` in feature extractor
with open(os.path.join(_A,_A ),"r" ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(_A )
config_dict.pop("processor_class" )
with open(os.path.join(_A,_A ),"w" ) as f:
f.write(json.dumps(_A ) )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ : Tuple = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(_A )
# copy relevant files
copyfile(_A,os.path.join(_A,"vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(_A,_A ),"w" ) as f:
f.write("{}" )
SCREAMING_SNAKE_CASE_ : List[str] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : int = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A ):
SCREAMING_SNAKE_CASE_ : Dict = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor",trust_remote_code=_A )
SCREAMING_SNAKE_CASE_ : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor",trust_remote_code=_A )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__,"NewProcessor" )
SCREAMING_SNAKE_CASE_ : Optional[int] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__,"NewFeatureExtractor" )
SCREAMING_SNAKE_CASE_ : int = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__,"NewTokenizerFast" )
# Test we can also load the slow version
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor",trust_remote_code=_A,use_fast=_A )
SCREAMING_SNAKE_CASE_ : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__,"NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__,"NewTokenizer" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
try:
AutoConfig.register("custom",_A )
AutoFeatureExtractor.register(_A,_A )
AutoTokenizer.register(_A,slow_tokenizer_class=_A )
AutoProcessor.register(_A,_A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A ):
AutoProcessor.register(_A,_A )
# Now that the config is registered, it can be used as any other config with the auto-API
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(_A,"vocab.txt" )
with open(_A,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str = CustomTokenizer(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = CustomProcessor(_A,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_A )
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoProcessor.from_pretrained(_A )
self.assertIsInstance(_A,_A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
class a__ ( A__ ):
A = False
class a__ ( A__ ):
A = False
class a__ ( A__ ):
A = 'AutoFeatureExtractor'
A = 'AutoTokenizer'
A = False
try:
AutoConfig.register("custom",_A )
AutoFeatureExtractor.register(_A,_A )
AutoTokenizer.register(_A,slow_tokenizer_class=_A )
AutoProcessor.register(_A,_A )
# If remote code is not set, the default is to use local classes.
SCREAMING_SNAKE_CASE_ : int = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__,"NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor",trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__,"NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
SCREAMING_SNAKE_CASE_ : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor",trust_remote_code=_A )
self.assertEqual(processor.__class__.__name__,"NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__,"BertTokenizerFast" )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__,"ConvNextImageProcessor" )
@is_staging_test
class a__ ( unittest.TestCase ):
A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCamelCase ( cls : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def __UpperCamelCase ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token,repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token,repo_id="test-dynamic-processor" )
except HTTPError:
pass
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A,"test-processor" ),push_to_hub=_A,use_auth_token=self._token )
SCREAMING_SNAKE_CASE_ : List[str] = WavaVecaProcessor.from_pretrained(F'{USER}/test-processor' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A,getattr(new_processor.feature_extractor,_A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(),processor.tokenizer.get_vocab() )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = WavaVecaProcessor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_A,"test-processor-org" ),push_to_hub=_A,use_auth_token=self._token,organization="valid_org",)
SCREAMING_SNAKE_CASE_ : List[Any] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_A,getattr(new_processor.feature_extractor,_A ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab(),processor.tokenizer.get_vocab() )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE_ : Optional[int] = CustomFeatureExtractor.from_pretrained(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_ : List[Any] = os.path.join(_A,"vocab.txt" )
with open(_A,"w",encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE_ : str = CustomTokenizer(_A )
SCREAMING_SNAKE_CASE_ : Any = CustomProcessor(_A,_A )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'{USER}/test-dynamic-processor',token=self._token )
SCREAMING_SNAKE_CASE_ : Optional[Any] = Repository(_A,clone_from=F'{USER}/test-dynamic-processor',token=self._token )
processor.save_pretrained(_A )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map,{
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
},)
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_A,"tokenizer_config.json" ) ) as f:
SCREAMING_SNAKE_CASE_ : Dict = json.load(_A )
self.assertDictEqual(
tokenizer_config["auto_map"],{
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
},)
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_A,"custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A,"custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(_A,"custom_processing.py" ) ) )
repo.push_to_hub()
SCREAMING_SNAKE_CASE_ : int = AutoProcessor.from_pretrained(F'{USER}/test-dynamic-processor',trust_remote_code=_A )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__,"CustomProcessor" )
| 18
|
def _snake_case ( lowerCAmelCase : list ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = len(lowerCAmelCase )
for i in range(1 , lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : int = collection[i]
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Tuple = i - 1
while low <= high:
SCREAMING_SNAKE_CASE_ : int = (low + high) // 2
if val < collection[mid]:
SCREAMING_SNAKE_CASE_ : Optional[Any] = mid - 1
else:
SCREAMING_SNAKE_CASE_ : Tuple = mid + 1
for j in range(lowerCAmelCase , lowerCAmelCase , -1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = collection[j - 1]
SCREAMING_SNAKE_CASE_ : int = val
return collection
if __name__ == "__main__":
__lowerCamelCase : Dict = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : List[str] = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted))
| 18
| 1
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = set(range(3 , SCREAMING_SNAKE_CASE , 2 ) )
primes.add(2 )
for p in range(3 , SCREAMING_SNAKE_CASE , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) )
snake_case_ = [float(SCREAMING_SNAKE_CASE ) for n in range(limit + 1 )]
for p in primes:
for n in range(SCREAMING_SNAKE_CASE , limit + 1 , SCREAMING_SNAKE_CASE ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return x if y == 0 else greatest_common_divisor(SCREAMING_SNAKE_CASE , x % y )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> int:
"""simple docstring"""
return (x * y) // greatest_common_divisor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 20 )-> int:
"""simple docstring"""
snake_case_ = 1
for i in range(1 , n + 1 ):
snake_case_ = lcm(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 267
| 1
|
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
snake_case : Dict = '''facebook/wmt19-en-de'''
snake_case : List[Any] = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
snake_case : Optional[Any] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
snake_case : Optional[Any] = FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
snake_case : Union[str, Any] = tokenizer(['''Making tiny model'''], return_tensors='''pt''')
snake_case : int = tiny_model(**batch)
print('''test output:''', len(outputs.logits[0]))
# Save
snake_case : Optional[int] = '''tiny-wmt19-en-de'''
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 94
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _lowercase ( __snake_case = "laptop" ) -> DataFrame:
__lowerCAmelCase : str = F"""https://www.amazon.in/laptop/s?k={product}"""
__lowerCAmelCase : Union[str, Any] = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36",
"Accept-Language": "en-US, en;q=0.5",
}
__lowerCAmelCase : List[str] = BeautifulSoup(requests.get(__snake_case ,headers=__snake_case ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Dict = DataFrame(
columns=[
"Product Title",
"Product Link",
"Current Price of the product",
"Product Rating",
"MRP of the product",
"Discount",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"div" ,attrs={"class": "s-result-item", "data-component-type": "s-search-result"} ,) ,soup.find_all("div" ,attrs={"class": "a-row a-size-base a-color-base"} ) ,):
try:
__lowerCAmelCase : Any = item.ha.text
__lowerCAmelCase : Union[str, Any] = "https://www.amazon.in/" + item.ha.a["href"]
__lowerCAmelCase : Any = item.find("span" ,attrs={"class": "a-offscreen"} ).text
try:
__lowerCAmelCase : Union[str, Any] = item.find("span" ,attrs={"class": "a-icon-alt"} ).text
except AttributeError:
__lowerCAmelCase : Optional[Any] = "Not available"
try:
__lowerCAmelCase : Union[str, Any] = (
"₹"
+ item.find(
"span" ,attrs={"class": "a-price a-text-price"} ).text.split("₹" )[1]
)
except AttributeError:
__lowerCAmelCase : Dict = ""
try:
__lowerCAmelCase : str = float(
(
(
float(product_mrp.strip("₹" ).replace("," ,"" ) )
- float(product_price.strip("₹" ).replace("," ,"" ) )
)
/ float(product_mrp.strip("₹" ).replace("," ,"" ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : List[str] = float("nan" )
except AttributeError:
pass
__lowerCAmelCase : int = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : Union[str, Any] = " "
__lowerCAmelCase : Union[str, Any] = " "
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__snake_case : Any = 'headphones'
get_amazon_product_data(product).to_csv(F"""Amazon Product Data for {product}.csv""")
| 269
| 0
|
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 350
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case_ ( )-> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=lowerCAmelCase_ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=lowerCAmelCase_ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=lowerCAmelCase_ )
return parser.parse_args()
def snake_case_ ( )-> str:
'''simple docstring'''
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Optional[Any] = script_fpath.stem
_UpperCAmelCase : List[str] = importlib.import_module(lowerCAmelCase_ )
# Patch sys.argv
_UpperCAmelCase : Dict = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 349
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.