code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase ( unittest.TestCase):
@require_torch
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
UpperCamelCase__ = load_dataset("ashraq/esc50" )
UpperCamelCase__ = dataset["train"]["audio"][-1]["array"]
UpperCamelCase__ = audio_classifier(_a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
pass
@slow
@require_torch
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
UpperCamelCase__ = load_dataset("ashraq/esc50" )
UpperCamelCase__ = dataset["train"]["audio"][-1]["array"]
UpperCamelCase__ = audio_classifier(_a, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
UpperCamelCase__ = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
UpperCamelCase__ = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_a ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from typing import Any
class UpperCAmelCase :
def __init__( self : Dict, a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = data
UpperCamelCase__ = None
class UpperCAmelCase :
def __init__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.head
while temp is not None:
print(temp.data, end=" " )
UpperCamelCase__ = temp.next
print()
def lowercase_ ( self : Optional[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = Node(snake_case_ )
UpperCamelCase__ = self.head
UpperCamelCase__ = new_node
def lowercase_ ( self : Optional[int], a_ : str, a_ : Dict ):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
UpperCamelCase__ = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCamelCase__ = node_a.next
if node_a is None or node_a is None:
return
UpperCamelCase__ = node_a.data, node_a.data
if __name__ == "__main__":
__lowercase: Dict = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("After swapping")
ll.print_list()
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase :
def __init__( self : Optional[Any], a_ : Dict, a_ : Tuple=13, a_ : Tuple=7, a_ : Dict=True, a_ : Optional[int]=True, a_ : Dict=True, a_ : Tuple=True, a_ : Union[str, Any]=99, a_ : str=[1, 1, 2], a_ : Dict=1, a_ : Dict=32, a_ : int=4, a_ : List[str]=8, a_ : Tuple=37, a_ : List[str]="gelu_new", a_ : Optional[int]=0.1, a_ : Any=0.1, a_ : Any=0.0, a_ : Tuple=512, a_ : Union[str, Any]=3, a_ : Tuple=0.02, a_ : str=3, a_ : Optional[Any]=4, a_ : Optional[int]=None, a_ : Any=False, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = block_sizes
UpperCamelCase__ = num_decoder_layers
UpperCamelCase__ = d_model
UpperCamelCase__ = n_head
UpperCamelCase__ = d_head
UpperCamelCase__ = d_inner
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = 2
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = scope
UpperCamelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
UpperCamelCase__ = n_head
# Used in the tests to check the size of the first hidden state
UpperCamelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
UpperCamelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
UpperCamelCase__ = self.num_hidden_layers + 2
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase__ = FunnelConfig(
vocab_size=self.vocab_size, block_sizes=self.block_sizes, num_decoder_layers=self.num_decoder_layers, d_model=self.d_model, n_head=self.n_head, d_head=self.d_head, d_inner=self.d_inner, hidden_act=self.hidden_act, hidden_dropout=self.hidden_dropout, attention_dropout=self.attention_dropout, activation_dropout=self.activation_dropout, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_std=self.initializer_std, )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self : Dict, a_ : List[str], a_ : Any, a_ : Tuple, a_ : Dict, a_ : Optional[int], a_ : List[str], a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelModel(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelModel(config=__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelModel(config=__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.d_model) )
def lowercase_ ( self : List[str], a_ : Optional[Any], a_ : Union[str, Any], a_ : Dict, a_ : List[str], a_ : List[str], a_ : Tuple, a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelBaseModel(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelBaseModel(config=__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 3, self.d_model) )
UpperCamelCase__ = False
UpperCamelCase__ = TFFunnelBaseModel(config=__lowercase )
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, 2, self.d_model) )
def lowercase_ ( self : Optional[int], a_ : Any, a_ : Optional[int], a_ : str, a_ : List[Any], a_ : Union[str, Any], a_ : Union[str, Any], a_ : Any, ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelForPreTraining(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Union[str, Any], a_ : int, a_ : List[str], a_ : Tuple, a_ : Dict, a_ : Any, a_ : Union[str, Any], a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelForMaskedLM(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : str, a_ : Any, a_ : List[str], a_ : Optional[int], a_ : str, a_ : int, a_ : Optional[Any], a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFFunnelForSequenceClassification(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : str, a_ : Optional[int], a_ : int, a_ : Optional[Any], a_ : Union[str, Any], a_ : str, a_ : str, a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = self.num_choices
UpperCamelCase__ = TFFunnelForMultipleChoice(config=__lowercase )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowercase, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowercase, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ = tf.tile(tf.expand_dims(__lowercase, 1 ), (1, self.num_choices, 1) )
UpperCamelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices) )
def lowercase_ ( self : Union[str, Any], a_ : List[Any], a_ : Tuple, a_ : List[Any], a_ : Optional[int], a_ : str, a_ : List[Any], a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFFunnelForTokenClassification(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str], a_ : List[str], a_ : Optional[Any], a_ : Optional[Any], a_ : List[str], a_ : Optional[int], a_ : int, a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelForQuestionAnswering(config=__lowercase )
UpperCamelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCamelCase__ = model(__lowercase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
UpperCamelCase__
) = config_and_inputs
UpperCamelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
_lowerCamelCase : Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : Optional[Any] = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : List[str] = False
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=__lowercase )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__lowercase )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowercase )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@require_tf
class UpperCAmelCase ( __lowerCamelCase , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : List[Any] = False
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFFunnelModelTester(self, base=__lowercase )
UpperCamelCase__ = ConfigTester(self, config_class=__lowercase )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__lowercase )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowercase )
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( __UpperCamelCase , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = KandinskyVaaPriorPipeline
_lowerCamelCase : Tuple = ['prompt']
_lowerCamelCase : Tuple = ['prompt', 'negative_prompt']
_lowerCamelCase : Any = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
_lowerCamelCase : str = False
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return 32
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return self.time_input_dim
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return 100
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowercase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-0_5, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
return CLIPTextModelWithProjection(a_ )
@property
def lowercase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
UpperCamelCase__ = PriorTransformer(**a_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
UpperCamelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, )
UpperCamelCase__ = CLIPVisionModelWithProjection(a_ )
return model
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPImageProcessor(
crop_size=224, do_center_crop=a_, do_normalize=a_, do_resize=a_, image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073], image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711], resample=3, size=224, )
return image_processor
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_prior
UpperCamelCase__ = self.dummy_image_encoder
UpperCamelCase__ = self.dummy_text_encoder
UpperCamelCase__ = self.dummy_tokenizer
UpperCamelCase__ = self.dummy_image_processor
UpperCamelCase__ = UnCLIPScheduler(
variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=a_, clip_sample_range=10.0, )
UpperCamelCase__ = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def lowercase_ ( self : int, a_ : Optional[int], a_ : List[str]=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 'cpu'
UpperCamelCase__ = self.get_dummy_components()
UpperCamelCase__ = self.pipeline_class(**a_ )
UpperCamelCase__ = pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = pipe(**self.get_dummy_inputs(a_ ) )
UpperCamelCase__ = output.image_embeds
UpperCamelCase__ = pipe(
**self.get_dummy_inputs(a_ ), return_dict=a_, )[0]
UpperCamelCase__ = image[0, -10:]
UpperCamelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
UpperCamelCase__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = torch_device == 'cpu'
UpperCamelCase__ = True
UpperCamelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=a_, relax_max_difference=a_, test_mean_pixel_difference=a_, )
@skip_mps
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = torch_device == 'cpu'
UpperCamelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=a_, test_mean_pixel_difference=a_, )
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Optional[Any] = {"""vocab_file""": """spiece.model"""}
__lowercase: int = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
}
}
# TODO(PVP) - this should be removed in Transformers v5
__lowercase: Optional[int] = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
__lowercase: Dict = """▁"""
class UpperCAmelCase ( lowerCAmelCase_):
_lowerCamelCase : int = VOCAB_FILES_NAMES
_lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : int, a_ : Optional[Any], a_ : List[str]="</s>", a_ : Tuple="<unk>", a_ : Tuple="<pad>", a_ : Tuple=100, a_ : Any=None, a_ : Optional[Any] = None, a_ : str=True, **a_ : Any, ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCamelCase__ = [f'<extra_id_{i}>' for i in range(__SCREAMING_SNAKE_CASE )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCamelCase__ = len(set(filter(lambda a_ : bool("extra_id" in str(__SCREAMING_SNAKE_CASE ) ), __SCREAMING_SNAKE_CASE ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
f'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
UpperCamelCase__ = legacy
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE, unk_token=__SCREAMING_SNAKE_CASE, pad_token=__SCREAMING_SNAKE_CASE, extra_ids=__SCREAMING_SNAKE_CASE, additional_special_tokens=__SCREAMING_SNAKE_CASE, sp_model_kwargs=self.sp_model_kwargs, legacy=__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = extra_ids
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@staticmethod
def lowercase_ ( a_ : Union[str, Any], a_ : List[Any], a_ : List[str] ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCamelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
f' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
f' {pretrained_model_name_or_path} automatically truncating your input to'
f' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
f' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value.", __SCREAMING_SNAKE_CASE, )
return max_model_length
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ ( self : str, a_ : str, a_ : Tuple = None, a_ : int = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__SCREAMING_SNAKE_CASE, token_ids_a=__SCREAMING_SNAKE_CASE, already_has_special_tokens=__SCREAMING_SNAKE_CASE )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
return ([0] * len(__SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(__SCREAMING_SNAKE_CASE )) + [1]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return list(
set(filter(lambda a_ : bool(re.search(R"<extra_id_\d+>", __SCREAMING_SNAKE_CASE ) ) is not None, self.additional_special_tokens ) ) )
def lowercase_ ( self : int ):
"""simple docstring"""
return [self._convert_token_to_id(__SCREAMING_SNAKE_CASE ) for token in self.get_sentinel_tokens()]
def lowercase_ ( self : Union[str, Any], a_ : Union[str, Any] ):
"""simple docstring"""
if len(__SCREAMING_SNAKE_CASE ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
f'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowercase_ ( self : List[str], a_ : Dict, a_ : Optional[int] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowercase_ ( self : Tuple, a_ : List[str], a_ : int = None ):
"""simple docstring"""
UpperCamelCase__ = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return token_ids_a
else:
UpperCamelCase__ = self._add_eos_if_not_present(__SCREAMING_SNAKE_CASE )
return token_ids_a + token_ids_a
def __getstate__( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Dict, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Optional[int], a_ : int, **a_ : Optional[int] ):
"""simple docstring"""
if not self.legacy:
UpperCamelCase__ = SPIECE_UNDERLINE + text.replace(__SCREAMING_SNAKE_CASE, " " )
return super().tokenize(__SCREAMING_SNAKE_CASE, **__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Any, a_ : Dict, **a_ : int ):
"""simple docstring"""
if not self.legacy:
UpperCamelCase__ = text.startswith(__SCREAMING_SNAKE_CASE )
if is_first:
UpperCamelCase__ = text[1:]
UpperCamelCase__ = self.sp_model.encode(__SCREAMING_SNAKE_CASE, out_type=__SCREAMING_SNAKE_CASE )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def lowercase_ ( self : Optional[int], a_ : Optional[int] ):
"""simple docstring"""
if token.startswith("<extra_id_" ):
UpperCamelCase__ = re.match(R"<extra_id_(\d+)>", __SCREAMING_SNAKE_CASE )
UpperCamelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def lowercase_ ( self : Dict, a_ : Any ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCamelCase__ = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = f'<extra_id_{self.vocab_size - 1 - index}>'
return token
def lowercase_ ( self : Tuple, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
UpperCamelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
UpperCamelCase__ = True
UpperCamelCase__ = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = False
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def lowercase_ ( self : Optional[int], a_ : Optional[int], a_ : Dict = None ):
"""simple docstring"""
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
__SCREAMING_SNAKE_CASE, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from collections import deque
class UpperCAmelCase :
def __init__( self : int, a_ : Dict, a_ : List[str], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = process_name # process name
UpperCamelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
UpperCamelCase__ = arrival_time
UpperCamelCase__ = burst_time # remaining burst time
UpperCamelCase__ = 0 # total time of the process wait in ready queue
UpperCamelCase__ = 0 # time from arrival time to completion time
class UpperCAmelCase :
def __init__( self : str, a_ : List[Any], a_ : Optional[Any], a_ : Union[str, Any], a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
UpperCamelCase__ = time_slices
# unfinished process is in this ready_queue
UpperCamelCase__ = queue
# current time
UpperCamelCase__ = current_time
# finished process is in this sequence queue
UpperCamelCase__ = deque()
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowercase_ ( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(a_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowercase_ ( self : str, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(a_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(a_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowercase_ ( self : List[Any], a_ : int ):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowercase_ ( self : Union[str, Any], a_ : int ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowercase_ ( self : str, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = deque() # sequence deque of finished process
while len(a_ ) != 0:
UpperCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(a_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
UpperCamelCase__ = 0
# set the process's turnaround time because it is finished
UpperCamelCase__ = self.current_time - cp.arrival_time
# set the completion time
UpperCamelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowercase_ ( self : str, a_ : List[str], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(a_ ) ):
UpperCamelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(a_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
UpperCamelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(a_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
UpperCamelCase__ = 0
# set the finish time
UpperCamelCase__ = self.current_time
# update the process' turnaround time because it is finished
UpperCamelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(a_ )
self.finish_queue.extend(a_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
UpperCamelCase__ , UpperCamelCase__ = self.round_robin(
self.ready_queue, self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowercase: str = Process("P1", 0, 53)
__lowercase: List[Any] = Process("P2", 0, 17)
__lowercase: Tuple = Process("P3", 0, 68)
__lowercase: Any = Process("P4", 0, 24)
__lowercase: Optional[int] = 3
__lowercase: Optional[int] = [17, 25]
__lowercase: Any = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
__lowercase: int = Process("P1", 0, 53)
__lowercase: List[str] = Process("P2", 0, 17)
__lowercase: List[str] = Process("P3", 0, 68)
__lowercase: Dict = Process("P4", 0, 24)
__lowercase: Dict = 3
__lowercase: Dict = [17, 25]
__lowercase: Tuple = deque([Pa, Pa, Pa, Pa])
__lowercase: Dict = MLFQ(number_of_queues, time_slices, queue, 0)
__lowercase: str = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
)
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowercase: Dict = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = 'deberta-v2'
def __init__( self : Optional[int], a_ : Tuple=12_8100, a_ : List[str]=1536, a_ : List[str]=24, a_ : Tuple=24, a_ : Tuple=6144, a_ : Optional[Any]="gelu", a_ : Optional[int]=0.1, a_ : Union[str, Any]=0.1, a_ : Tuple=512, a_ : Dict=0, a_ : Optional[Any]=0.02, a_ : int=1e-7, a_ : Dict=False, a_ : int=-1, a_ : Dict=0, a_ : int=True, a_ : List[Any]=None, a_ : Optional[Any]=0, a_ : List[str]="gelu", **a_ : Union[str, Any], ):
"""simple docstring"""
super().__init__(**_lowerCAmelCase )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = relative_attention
UpperCamelCase__ = max_relative_positions
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
UpperCamelCase__ = [x.strip() for x in pos_att_type.lower().split("|" )]
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = kwargs.get("pooler_hidden_size", _lowerCAmelCase )
UpperCamelCase__ = pooler_dropout
UpperCamelCase__ = pooler_hidden_act
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return 12
def lowercase_ ( self : Union[str, Any], a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], a_ : int = -1, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional["TensorType"] = None, a_ : int = 3, a_ : int = 40, a_ : int = 40, a_ : "PreTrainedTokenizerBase" = None, ):
"""simple docstring"""
UpperCamelCase__ = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase, framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
UpperCamelCase__ = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = mock.Mock()
UpperCamelCase__ = 500
UpperCamelCase__ = {}
UpperCamelCase__ = HTTPError
UpperCamelCase__ = {}
# Download this model to make sure it's in the cache.
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_SCREAMING_SNAKE_CASE ) as mock_head:
UpperCamelCase__ = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
try:
UpperCamelCase__ = tempfile.mktemp()
with open(_SCREAMING_SNAKE_CASE, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", _SCREAMING_SNAKE_CASE )
UpperCamelCase__ = AlbertTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
finally:
os.remove(_SCREAMING_SNAKE_CASE )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", _SCREAMING_SNAKE_CASE )
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TOKEN
HfFolder.save_token(_SCREAMING_SNAKE_CASE )
@classmethod
def lowercase_ ( cls : Union[str, Any] ):
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def lowercase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE, "vocab.txt" )
with open(_SCREAMING_SNAKE_CASE, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE, repo_id="test-tokenizer", push_to_hub=_SCREAMING_SNAKE_CASE, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained(f'{USER}/test-tokenizer' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE, "vocab.txt" )
with open(_SCREAMING_SNAKE_CASE, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizer(_SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_SCREAMING_SNAKE_CASE, repo_id="valid_org/test-tokenizer-org", push_to_hub=_SCREAMING_SNAKE_CASE, use_auth_token=self._token )
UpperCamelCase__ = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE, "vocab.txt" )
with open(_SCREAMING_SNAKE_CASE, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = CustomTokenizer(_SCREAMING_SNAKE_CASE )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = os.path.join(_SCREAMING_SNAKE_CASE, "vocab.txt" )
with open(_SCREAMING_SNAKE_CASE, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
UpperCamelCase__ = BertTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
bert_tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = CustomTokenizerFast.from_pretrained(_SCREAMING_SNAKE_CASE )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
UpperCamelCase__ = AutoTokenizer.from_pretrained(f'{USER}/test-dynamic-tokenizer', trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(
f'{USER}/test-dynamic-tokenizer', use_fast=_SCREAMING_SNAKE_CASE, trust_remote_code=_SCREAMING_SNAKE_CASE )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = Trie()
UpperCamelCase__ = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(_SCREAMING_SNAKE_CASE, ["AB", "C"] )
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import os
__lowercase: Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1_000}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
while index < len(_UpperCamelCase ) - 1:
UpperCamelCase__ = SYMBOLS[numerals[index]]
UpperCamelCase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = ''
UpperCamelCase__ = num // 10_00
numerals += m_count * "M"
num %= 10_00
UpperCamelCase__ = num // 1_00
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_00
UpperCamelCase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "/p089_roman.txt" ) -> int:
'''simple docstring'''
UpperCamelCase__ = 0
with open(os.path.dirname(_UpperCamelCase ) + roman_numerals_filename ) as filea:
UpperCamelCase__ = filea.readlines()
for line in lines:
UpperCamelCase__ = line.strip()
UpperCamelCase__ = parse_roman_numerals(_UpperCamelCase )
UpperCamelCase__ = generate_roman_numerals(_UpperCamelCase )
savings += len(_UpperCamelCase ) - len(_UpperCamelCase )
return savings
if __name__ == "__main__":
print(F"""{solution() = }""")
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = image.size
UpperCamelCase__ = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
UpperCamelCase__ = image.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] )
UpperCamelCase__ = np.array(_UpperCamelCase ).astype(np.floataa ) / 2_5_5.0
UpperCamelCase__ = image[None].transpose(0 , 3 , 1 , 2 )
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
return 2.0 * image - 1.0
class UpperCAmelCase ( __lowerCAmelCase):
def __init__( self : Dict, a_ : VQModel, a_ : UNetaDModel, a_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
], ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=lowerCamelCase__, unet=lowerCamelCase__, scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : int, a_ : Union[torch.Tensor, PIL.Image.Image] = None, a_ : Optional[int] = 1, a_ : Optional[int] = 100, a_ : Optional[float] = 0.0, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : Optional[str] = "pil", a_ : bool = True, ):
"""simple docstring"""
if isinstance(lowerCamelCase__, PIL.Image.Image ):
UpperCamelCase__ = 1
elif isinstance(lowerCamelCase__, torch.Tensor ):
UpperCamelCase__ = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase__ )}' )
if isinstance(lowerCamelCase__, PIL.Image.Image ):
UpperCamelCase__ = preprocess(lowerCamelCase__ )
UpperCamelCase__ = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
UpperCamelCase__ = (batch_size, self.unet.config.in_channels // 2, height, width)
UpperCamelCase__ = next(self.unet.parameters() ).dtype
UpperCamelCase__ = randn_tensor(lowerCamelCase__, generator=lowerCamelCase__, device=self.device, dtype=lowerCamelCase__ )
UpperCamelCase__ = image.to(device=self.device, dtype=lowerCamelCase__ )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase__, device=self.device )
UpperCamelCase__ = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(lowerCamelCase__ ):
# concat latents and low resolution image in the channel dimension.
UpperCamelCase__ = torch.cat([latents, image], dim=1 )
UpperCamelCase__ = self.scheduler.scale_model_input(lowerCamelCase__, lowerCamelCase__ )
# predict the noise residual
UpperCamelCase__ = self.unet(lowerCamelCase__, lowerCamelCase__ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
# decode the image latents with the VQVAE
UpperCamelCase__ = self.vqvae.decode(lowerCamelCase__ ).sample
UpperCamelCase__ = torch.clamp(lowerCamelCase__, -1.0, 1.0 )
UpperCamelCase__ = image / 2 + 0.5
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
__lowercase: List[Any] = 0
__lowercase: Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowercase: str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
__lowercase: int = tuple[int, int]
class UpperCAmelCase :
def __init__( self : List[str], a_ : int, a_ : int, a_ : int, a_ : int, a_ : int, a_ : Node | None, ):
"""simple docstring"""
UpperCamelCase__ = pos_x
UpperCamelCase__ = pos_y
UpperCamelCase__ = (pos_y, pos_x)
UpperCamelCase__ = goal_x
UpperCamelCase__ = goal_y
UpperCamelCase__ = g_cost
UpperCamelCase__ = parent
UpperCamelCase__ = self.calculate_heuristic()
UpperCamelCase__ = self.g_cost + self.h_cost
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.pos_x - self.goal_x
UpperCamelCase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowercase__ ) + abs(lowercase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Dict, a_ : Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class UpperCAmelCase :
def __init__( self : str, a_ : TPosition, a_ : TPosition ):
"""simple docstring"""
UpperCamelCase__ = Node(start[1], start[0], goal[1], goal[0], 0, lowercase__ )
UpperCamelCase__ = Node(goal[1], goal[0], goal[1], goal[0], 9_9999, lowercase__ )
UpperCamelCase__ = [self.start]
UpperCamelCase__ = []
UpperCamelCase__ = False
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
UpperCamelCase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowercase__ )
self.closed_nodes.append(lowercase__ )
UpperCamelCase__ = self.get_successors(lowercase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
UpperCamelCase__ = self.open_nodes.pop(self.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowercase__ )
else:
self.open_nodes.append(lowercase__ )
return [self.start.pos]
def lowercase_ ( self : Union[str, Any], a_ : Node ):
"""simple docstring"""
UpperCamelCase__ = []
for action in delta:
UpperCamelCase__ = parent.pos_x + action[1]
UpperCamelCase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowercase__, lowercase__, self.target.pos_y, self.target.pos_x, parent.g_cost + 1, lowercase__, ) )
return successors
def lowercase_ ( self : Any, a_ : Node | None ):
"""simple docstring"""
UpperCamelCase__ = node
UpperCamelCase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
UpperCamelCase__ = current_node.parent
path.reverse()
return path
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : TPosition, a_ : TPosition ):
"""simple docstring"""
UpperCamelCase__ = AStar(lowercase__, lowercase__ )
UpperCamelCase__ = AStar(lowercase__, lowercase__ )
UpperCamelCase__ = False
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
UpperCamelCase__ = self.fwd_astar.open_nodes.pop(0 )
UpperCamelCase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowercase__, lowercase__ )
self.fwd_astar.closed_nodes.append(lowercase__ )
self.bwd_astar.closed_nodes.append(lowercase__ )
UpperCamelCase__ = current_bwd_node
UpperCamelCase__ = current_fwd_node
UpperCamelCase__ = {
self.fwd_astar: self.fwd_astar.get_successors(lowercase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowercase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowercase__ )
else:
# retrieve the best current path
UpperCamelCase__ = astar.open_nodes.pop(
astar.open_nodes.index(lowercase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowercase__ )
else:
astar.open_nodes.append(lowercase__ )
return [self.fwd_astar.start.pos]
def lowercase_ ( self : Tuple, a_ : Node, a_ : Node ):
"""simple docstring"""
UpperCamelCase__ = self.fwd_astar.retrace_path(lowercase__ )
UpperCamelCase__ = self.bwd_astar.retrace_path(lowercase__ )
bwd_path.pop()
bwd_path.reverse()
UpperCamelCase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
__lowercase: int = (0, 0)
__lowercase: str = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowercase: Optional[Any] = time.time()
__lowercase: List[str] = AStar(init, goal)
__lowercase: Tuple = a_star.search()
__lowercase: Dict = time.time() - start_time
print(F"""AStar execution time = {end_time:f} seconds""")
__lowercase: Union[str, Any] = time.time()
__lowercase: str = BidirectionalAStar(init, goal)
__lowercase: List[Any] = time.time() - bd_start_time
print(F"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__lowercase: List[str] = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__lowercase: Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : int ):
"""simple docstring"""
debug_launcher(test_script.main )
def lowercase_ ( self : int ):
"""simple docstring"""
debug_launcher(test_ops.main )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowercase: Tuple = logging.get_logger(__name__)
class UpperCAmelCase ( __lowercase):
def __init__( self : int, *a_ : Union[str, Any], **a_ : List[str] ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead.", UpperCAmelCase__, )
super().__init__(*UpperCAmelCase__, **UpperCAmelCase__ )
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
__lowercase: List[str] = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
__lowercase: Tuple = {value: key for key, value in MORSE_CODE_DICT.items()}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return "".join(REVERSE_DICT[char] for char in message.split() )
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = """Morse code here!"""
print(__lowerCAmelCase )
UpperCamelCase__ = encrypt(__lowerCAmelCase )
print(__lowerCAmelCase )
UpperCamelCase__ = decrypt(__lowerCAmelCase )
print(__lowerCAmelCase )
if __name__ == "__main__":
main()
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = 0.0_0
UpperCamelCase__ = 0
for resistor in resistors:
if resistor <= 0:
UpperCamelCase__ = F'Resistor at index {index} has a negative or zero value!'
raise ValueError(lowercase__ )
first_sum += 1 / float(lowercase__ )
index += 1
return 1 / first_sum
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = 0.0_0
UpperCamelCase__ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
UpperCamelCase__ = F'Resistor at index {index} has a negative value!'
raise ValueError(lowercase__ )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False')) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env')
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 650, 'eval_accuracy': 0.6, 'eval_loss': 0.9},
},
{
'framework': 'tensorflow',
'script': 'run_tf.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.g4dn.xlarge',
'results': {'train_runtime': 600, 'eval_accuracy': 0.3, 'eval_loss': 0.9},
},
])
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : str ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split(), encoding="utf-8", check=_SCREAMING_SNAKE_CASE, )
assert hasattr(self, "env" )
def lowercase_ ( self : Tuple, a_ : Dict=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f'{self.env.base_job_name}-single', instance_count=_SCREAMING_SNAKE_CASE, instance_type=self.instance_type, debugger_hook_config=_SCREAMING_SNAKE_CASE, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", )
def lowercase_ ( self : List[Any], a_ : Optional[int] ):
"""simple docstring"""
TrainingJobAnalytics(_SCREAMING_SNAKE_CASE ).export_csv(f'{self.env.test_path}/{job_name}_metrics.csv' )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'{estimator.latest_training_job.name}.json', "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, _SCREAMING_SNAKE_CASE )
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import logging
from transformers.configuration_utils import PretrainedConfig
__lowercase: int = logging.getLogger(__name__)
class UpperCAmelCase ( __snake_case):
_lowerCamelCase : str = "masked_bert"
def __init__( self : Union[str, Any], a_ : Any=3_0522, a_ : Union[str, Any]=768, a_ : Optional[Any]=12, a_ : str=12, a_ : str=3072, a_ : int="gelu", a_ : List[str]=0.1, a_ : Union[str, Any]=0.1, a_ : List[str]=512, a_ : List[str]=2, a_ : str=0.02, a_ : Tuple=1e-1_2, a_ : Optional[Any]=0, a_ : Optional[int]="topK", a_ : List[str]="constant", a_ : Any=0.0, **a_ : List[str], ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase_, **lowerCamelCase_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = pruning_method
UpperCamelCase__ = mask_init
UpperCamelCase__ = mask_scale
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Dict ) -> tuple[int | None, int | None, float]:
'''simple docstring'''
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCamelCase__ = (low + high) // 2
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_subarray(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_subarray(SCREAMING_SNAKE_CASE_ , mid + 1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = max_cross_sum(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : int ) -> tuple[int, int, float]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = float("-inf" ), -1
UpperCamelCase__ , UpperCamelCase__ = float("-inf" ), -1
UpperCamelCase__ = 0
for i in range(SCREAMING_SNAKE_CASE_ , low - 1 , -1 ):
summ += arr[i]
if summ > left_sum:
UpperCamelCase__ = summ
UpperCamelCase__ = i
UpperCamelCase__ = 0
for i in range(mid + 1 , high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCamelCase__ = summ
UpperCamelCase__ = i
return max_left, max_right, (left_sum + right_sum)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> float:
'''simple docstring'''
UpperCamelCase__ = [randint(1 , SCREAMING_SNAKE_CASE_ ) for _ in range(SCREAMING_SNAKE_CASE_ )]
UpperCamelCase__ = time.time()
max_subarray(SCREAMING_SNAKE_CASE_ , 0 , input_size - 1 )
UpperCamelCase__ = time.time()
return end - start
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = [10, 1_00, 10_00, 1_00_00, 5_00_00, 10_00_00, 20_00_00, 30_00_00, 40_00_00, 50_00_00]
UpperCamelCase__ = [time_max_subarray(SCREAMING_SNAKE_CASE_ ) for input_size in input_sizes]
print("No of Inputs\t\tTime Taken" )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
print(SCREAMING_SNAKE_CASE_ , "\t\t" , SCREAMING_SNAKE_CASE_ )
plt.plot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
plt.xlabel("Number of Inputs" )
plt.ylabel("Time taken in seconds" )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__lowercase: List[str] = pd.read_csv("sample_data.csv", header=None)
__lowercase: Tuple = df.shape[:1][0]
# If you're using some other dataset input the target column
__lowercase: Dict = df.iloc[:, 1:2]
__lowercase: Any = actual_data.values.reshape(len_data, 1)
__lowercase: Optional[Any] = MinMaxScaler().fit_transform(actual_data)
__lowercase: Optional[Any] = 10
__lowercase: Dict = 5
__lowercase: Union[str, Any] = 20
__lowercase: Tuple = len_data - periods * look_back
__lowercase: Any = actual_data[:division]
__lowercase: Dict = actual_data[division - look_back :]
__lowercase ,__lowercase: Dict = [], []
__lowercase ,__lowercase: Optional[int] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__lowercase: Dict = np.array(train_x)
__lowercase: Any = np.array(test_x)
__lowercase: Optional[Any] = np.array([list(i.ravel()) for i in train_y])
__lowercase: List[Any] = np.array([list(i.ravel()) for i in test_y])
__lowercase: Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="mean_squared_error", optimizer="adam")
__lowercase: str = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__lowercase: Optional[int] = model.predict(x_test)
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: List[Any] = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class UpperCAmelCase ( _a):
_lowerCamelCase : Any = """bart"""
_lowerCamelCase : int = ["""past_key_values"""]
_lowerCamelCase : Optional[int] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : List[str], a_ : Optional[int]=5_0265, a_ : List[Any]=1024, a_ : Optional[int]=12, a_ : str=4096, a_ : Any=16, a_ : Dict=12, a_ : Tuple=4096, a_ : Optional[int]=16, a_ : Any=0.0, a_ : List[str]=0.0, a_ : Optional[Any]="gelu", a_ : Tuple=1024, a_ : List[Any]=0.1, a_ : Optional[int]=0.0, a_ : str=0.0, a_ : Optional[int]=0.02, a_ : Optional[int]=0.0, a_ : List[str]=False, a_ : Dict=True, a_ : Any=3, a_ : Optional[Any]=1, a_ : Any=0, a_ : Tuple=2, a_ : Dict=True, a_ : Tuple=2, a_ : int=2, **a_ : List[Any], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowerCamelCase, pad_token_id=__lowerCamelCase, bos_token_id=__lowerCamelCase, eos_token_id=__lowerCamelCase, is_encoder_decoder=__lowerCamelCase, decoder_start_token_id=__lowerCamelCase, forced_eos_token_id=__lowerCamelCase, **__lowerCamelCase, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated", __lowerCamelCase ):
UpperCamelCase__ = self.bos_token_id
warnings.warn(
f'Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '
"The config can simply be saved and uploaded again to be fixed." )
class UpperCAmelCase ( _a):
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase__ = {0: """batch"""}
UpperCamelCase__ = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
UpperCamelCase__ = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowerCamelCase, direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
UpperCamelCase__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase__ = self.num_layers
for i in range(__lowerCamelCase ):
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
else:
UpperCamelCase__ = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = super().outputs
else:
UpperCamelCase__ = super(__lowerCamelCase, self ).outputs
if self.use_past:
UpperCamelCase__ = self.num_layers
for i in range(__lowerCamelCase ):
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
UpperCamelCase__ = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def lowercase_ ( self : Optional[Any], a_ : PreTrainedTokenizer, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional[TensorType] = None, ):
"""simple docstring"""
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Generate decoder inputs
UpperCamelCase__ = seq_length if not self.use_past else 1
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
UpperCamelCase__ = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
UpperCamelCase__ = dict(**__lowerCamelCase, **__lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase__ = common_inputs["""input_ids"""].shape
UpperCamelCase__ = common_inputs["""decoder_input_ids"""].shape[1]
UpperCamelCase__ = self.num_attention_heads
UpperCamelCase__ = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ = decoder_seq_length + 3
UpperCamelCase__ = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
UpperCamelCase__ = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__lowerCamelCase, __lowerCamelCase )], dim=1 )
UpperCamelCase__ = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
UpperCamelCase__ = self.num_layers
UpperCamelCase__ = min(__lowerCamelCase, __lowerCamelCase )
UpperCamelCase__ = max(__lowerCamelCase, __lowerCamelCase ) - min_num_layers
UpperCamelCase__ = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(__lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
torch.zeros(__lowerCamelCase ),
) )
# TODO: test this.
UpperCamelCase__ = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(__lowerCamelCase, __lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) )
return common_inputs
def lowercase_ ( self : int, a_ : PreTrainedTokenizer, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional[TensorType] = None, ):
"""simple docstring"""
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
UpperCamelCase__ = seqlen + 2
UpperCamelCase__ = self.num_layers
UpperCamelCase__ = self.num_attention_heads
UpperCamelCase__ = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
UpperCamelCase__ = common_inputs["""attention_mask"""].dtype
UpperCamelCase__ = torch.cat(
[common_inputs["attention_mask"], torch.ones(__lowerCamelCase, __lowerCamelCase, dtype=__lowerCamelCase )], dim=1 )
UpperCamelCase__ = [
(torch.zeros(__lowerCamelCase ), torch.zeros(__lowerCamelCase )) for _ in range(__lowerCamelCase )
]
return common_inputs
def lowercase_ ( self : Tuple, a_ : PreTrainedTokenizer, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional[TensorType] = None, ):
"""simple docstring"""
UpperCamelCase__ = compute_effective_axis_dimension(
__lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
UpperCamelCase__ = tokenizer.num_special_tokens_to_add(__lowerCamelCase )
UpperCamelCase__ = compute_effective_axis_dimension(
__lowerCamelCase, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=__lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
UpperCamelCase__ = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
UpperCamelCase__ = dict(tokenizer(__lowerCamelCase, return_tensors=__lowerCamelCase ) )
return common_inputs
def lowercase_ ( self : Optional[int], a_ : PreTrainedTokenizer, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional[TensorType] = None, ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
elif self.task == "causal-lm":
UpperCamelCase__ = self._generate_dummy_inputs_for_causal_lm(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
else:
UpperCamelCase__ = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowerCamelCase, batch_size=__lowerCamelCase, seq_length=__lowerCamelCase, is_pair=__lowerCamelCase, framework=__lowerCamelCase )
return common_inputs
def lowercase_ ( self : List[str], a_ : List[str], a_ : List[Any], a_ : Union[str, Any], a_ : Dict ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
UpperCamelCase__ = super()._flatten_past_key_values_(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
else:
UpperCamelCase__ = super(__lowerCamelCase, self )._flatten_past_key_values_(
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple = 1_00 , ) -> float:
'''simple docstring'''
UpperCamelCase__ = x_start
UpperCamelCase__ = fnc(a__ )
UpperCamelCase__ = 0.0
for _ in range(a__ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCamelCase__ = (x_end - x_start) / steps + xa
UpperCamelCase__ = fnc(a__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCamelCase__ = xa
UpperCamelCase__ = fxa
return length
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
__lowercase: Union[str, Any] = 10
while i <= 100_000:
print(F"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
__lowercase: Optional[Any] = {
"configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = [
"LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongT5EncoderModel",
"LongT5ForConditionalGeneration",
"LongT5Model",
"LongT5PreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = [
"FlaxLongT5ForConditionalGeneration",
"FlaxLongT5Model",
"FlaxLongT5PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
__lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] = 4 ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = abs(_UpperCamelCase ) or 4
return [[1 + x + y * row_size for x in range(_UpperCamelCase )] for y in range(_UpperCamelCase )]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
return reverse_row(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
return reverse_row(reverse_column(_UpperCamelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
return reverse_column(transpose(_UpperCamelCase ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = [list(_UpperCamelCase ) for x in zip(*_UpperCamelCase )]
return matrix
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for i in matrix:
print(*_UpperCamelCase )
if __name__ == "__main__":
__lowercase: Tuple = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 90 counterclockwise:\n")
print_matrix(rotate_aa(matrix))
__lowercase: Union[str, Any] = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 180:\n")
print_matrix(rotate_aaa(matrix))
__lowercase: int = make_matrix()
print("\norigin:\n")
print_matrix(matrix)
print("\nrotate 270 counterclockwise:\n")
print_matrix(rotate_aaa(matrix))
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
__lowercase: Dict = True
except (ImportError, AttributeError):
__lowercase: Any = object
def SCREAMING_SNAKE_CASE__( *_UpperCamelCase : Union[str, Any] , **_UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
pass
__lowercase: Optional[int] = False
__lowercase: int = logging.get_logger("transformers-cli/serving")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Namespace ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(lowerCamelCase__ , args.host , args.port , args.workers )
class UpperCAmelCase ( a_):
_lowerCamelCase : dict
class UpperCAmelCase ( a_):
_lowerCamelCase : List[str]
_lowerCamelCase : Optional[List[int]]
class UpperCAmelCase ( a_):
_lowerCamelCase : str
class UpperCAmelCase ( a_):
_lowerCamelCase : Any
class UpperCAmelCase ( a_):
@staticmethod
def lowercase_ ( a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = parser.add_parser(
"serve", help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task", type=a_, choices=get_supported_tasks(), help="The task to run the pipeline on", )
serve_parser.add_argument("--host", type=a_, default="localhost", help="Interface the server will listen on." )
serve_parser.add_argument("--port", type=a_, default=8888, help="Port the serving will listen to." )
serve_parser.add_argument("--workers", type=a_, default=1, help="Number of http workers" )
serve_parser.add_argument("--model", type=a_, help="Model's name or path to stored model." )
serve_parser.add_argument("--config", type=a_, help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer", type=a_, help="Tokenizer name to use." )
serve_parser.add_argument(
"--device", type=a_, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", )
serve_parser.set_defaults(func=a_ )
def __init__( self : Tuple, a_ : Any, a_ : Union[str, Any], a_ : int, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline
UpperCamelCase__ = host
UpperCamelCase__ = port
UpperCamelCase__ = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(f'Serving model over {host}:{port}' )
UpperCamelCase__ = FastAPI(
routes=[
APIRoute(
"/", self.model_info, response_model=a_, response_class=a_, methods=["GET"], ),
APIRoute(
"/tokenize", self.tokenize, response_model=a_, response_class=a_, methods=["POST"], ),
APIRoute(
"/detokenize", self.detokenize, response_model=a_, response_class=a_, methods=["POST"], ),
APIRoute(
"/forward", self.forward, response_model=a_, response_class=a_, methods=["POST"], ),
], timeout=600, )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
run(self._app, host=self.host, port=self.port, workers=self.workers )
def lowercase_ ( self : str ):
"""simple docstring"""
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def lowercase_ ( self : Optional[Any], a_ : Tuple = Body(a_, embed=a_ ), a_ : str = Body(a_, embed=a_ ) ):
"""simple docstring"""
try:
UpperCamelCase__ = self._pipeline.tokenizer.tokenize(a_ )
if return_ids:
UpperCamelCase__ = self._pipeline.tokenizer.convert_tokens_to_ids(a_ )
return ServeTokenizeResult(tokens=a_, tokens_ids=a_ )
else:
return ServeTokenizeResult(tokens=a_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(a_ )} )
def lowercase_ ( self : Optional[Any], a_ : int = Body(a_, embed=a_ ), a_ : Optional[int] = Body(a_, embed=a_ ), a_ : Any = Body(a_, embed=a_ ), ):
"""simple docstring"""
try:
UpperCamelCase__ = self._pipeline.tokenizer.decode(a_, a_, a_ )
return ServeDeTokenizeResult(model="", text=a_ )
except Exception as e:
raise HTTPException(status_code=500, detail={"model": "", "error": str(a_ )} )
async def lowercase_ ( self : Dict, a_ : int=Body(a_, embed=a_ ) ):
"""simple docstring"""
if len(a_ ) == 0:
return ServeForwardResult(output=[], attention=[] )
try:
# Forward through the model
UpperCamelCase__ = self._pipeline(a_ )
return ServeForwardResult(output=a_ )
except Exception as e:
raise HTTPException(500, {"error": str(a_ )} )
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Tuple = logging.get_logger()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : LevitConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True ) -> Dict:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
UpperCamelCase__ = timm.create_model("levit_128s" , pretrained=_SCREAMING_SNAKE_CASE )
else:
UpperCamelCase__ = timm.create_model("levit_128" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 1_92:
UpperCamelCase__ = timm.create_model("levit_192" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 2_56:
UpperCamelCase__ = timm.create_model("levit_256" , pretrained=_SCREAMING_SNAKE_CASE )
if hidden_sizes == 3_84:
UpperCamelCase__ = timm.create_model("levit_384" , pretrained=_SCREAMING_SNAKE_CASE )
from_model.eval()
UpperCamelCase__ = LevitForImageClassificationWithTeacher(_SCREAMING_SNAKE_CASE ).eval()
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = from_model.state_dict()
UpperCamelCase__ = list(from_model.state_dict().keys() )
UpperCamelCase__ = list(our_model.state_dict().keys() )
print(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = weights[og_keys[i]]
our_model.load_state_dict(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = torch.randn((2, 3, 2_24, 2_24) )
UpperCamelCase__ = from_model(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = our_model(_SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
UpperCamelCase__ = name
print(_SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
UpperCamelCase__ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = 10_00
UpperCamelCase__ = (1, num_labels)
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
"levit-128S": 1_28,
"levit-128": 1_28,
"levit-192": 1_92,
"levit-256": 2_56,
"levit-384": 3_84,
}
UpperCamelCase__ = {
"levit-128S": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-128": ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"levit-192": ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-256": ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"levit-384": ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__lowercase: List[Any] = parser.parse_args()
__lowercase: Tuple = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[Any], a_ : Optional[Any]=7, a_ : str=3, a_ : Union[str, Any]=18, a_ : int=30, a_ : Union[str, Any]=400, a_ : List[Any]=True, a_ : str=None, a_ : int=True, a_ : List[str]=None, a_ : Optional[int]=True, a_ : Union[str, Any]=[0.5, 0.5, 0.5], a_ : str=[0.5, 0.5, 0.5], a_ : List[Any]=False, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {'''height''': 20, '''width''': 20}
UpperCamelCase__ = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_center_crop
UpperCamelCase__ = crop_size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = do_reduce_labels
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def SCREAMING_SNAKE_CASE__ ( ) -> str:
'''simple docstring'''
UpperCamelCase__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase__ = Image.open(dataset[0]["file"] )
UpperCamelCase__ = Image.open(dataset[1]["file"] )
return image, map
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
UpperCamelCase__ = Image.open(ds[0]["file"] )
UpperCamelCase__ = Image.open(ds[1]["file"] )
UpperCamelCase__ = Image.open(ds[2]["file"] )
UpperCamelCase__ = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = BeitImageProcessor if is_vision_available() else None
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = BeitImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, "do_resize" ) )
self.assertTrue(hasattr(a_, "size" ) )
self.assertTrue(hasattr(a_, "do_center_crop" ) )
self.assertTrue(hasattr(a_, "center_crop" ) )
self.assertTrue(hasattr(a_, "do_normalize" ) )
self.assertTrue(hasattr(a_, "image_mean" ) )
self.assertTrue(hasattr(a_, "image_std" ) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 20, "width": 20} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
self.assertEqual(image_processor.do_reduce_labels, a_ )
UpperCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict, size=42, crop_size=84, reduce_labels=a_ )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
self.assertEqual(image_processor.do_reduce_labels, a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
UpperCamelCase__ = []
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], maps[0], return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched
UpperCamelCase__ = image_processing(a_, a_, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test not batched input (PIL images)
UpperCamelCase__ = prepare_semantic_single_inputs()
UpperCamelCase__ = image_processing(a_, a_, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
# Test batched input (PIL images)
UpperCamelCase__ = prepare_semantic_batch_inputs()
UpperCamelCase__ = image_processing(a_, a_, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
UpperCamelCase__ = prepare_semantic_single_inputs()
UpperCamelCase__ = image_processing(a_, a_, return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 150 )
UpperCamelCase__ = True
UpperCamelCase__ = image_processing(a_, a_, return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 255 )
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
__lowercase: List[Any] = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class UpperCAmelCase ( _lowerCamelCase):
def __init__( self : Optional[int], *a_ : Any, **a_ : List[Any] ):
"""simple docstring"""
super().__init__(*a_, **a_ )
self.check_model_type(a_ )
def lowercase_ ( self : Optional[int], a_ : Any=None, a_ : str=None, a_ : Union[str, Any]=None, **a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = {}, {}
if padding is not None:
UpperCamelCase__ = padding
if truncation is not None:
UpperCamelCase__ = truncation
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : List[Any], a_ : Union["Image.Image", str], a_ : str = None, **a_ : Dict ):
"""simple docstring"""
if isinstance(a_, (Image.Image, str) ) and isinstance(a_, a_ ):
UpperCamelCase__ = {"image": image, "question": question}
else:
UpperCamelCase__ = image
UpperCamelCase__ = super().__call__(a_, **a_ )
return results
def lowercase_ ( self : List[str], a_ : Optional[int], a_ : Optional[Any]=False, a_ : Any=False ):
"""simple docstring"""
UpperCamelCase__ = load_image(inputs["image"] )
UpperCamelCase__ = self.tokenizer(
inputs["question"], return_tensors=self.framework, padding=a_, truncation=a_ )
UpperCamelCase__ = self.image_processor(images=a_, return_tensors=self.framework )
model_inputs.update(a_ )
return model_inputs
def lowercase_ ( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model(**a_ )
return model_outputs
def lowercase_ ( self : str, a_ : Optional[Any], a_ : Tuple=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.sigmoid()[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(a_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(a_, a_ )]
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase :
def __init__( self : int, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = str(id_ )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = []
UpperCamelCase__ = {} # {vertex:distance}
def __lt__( self : int, a_ : Dict ):
"""simple docstring"""
return self.key < other.key
def __repr__( self : Union[str, Any] ):
"""simple docstring"""
return self.id
def lowercase_ ( self : List[Any], a_ : List[str] ):
"""simple docstring"""
self.neighbors.append(a_ )
def lowercase_ ( self : Tuple, a_ : Dict, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = weight
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple , _UpperCamelCase : str ) -> Any:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCAmelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : Vertex ) -> list:
UpperCamelCase__ = []
for u in graph:
UpperCamelCase__ = math.inf
UpperCamelCase__ = None
UpperCamelCase__ = 0
UpperCamelCase__ = graph[:]
while q:
UpperCamelCase__ = min(_lowerCAmelCase )
q.remove(_lowerCAmelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCamelCase__ = u
UpperCamelCase__ = u.edges[v.id]
for i in range(1 , len(_lowerCAmelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : Vertex ) -> Iterator[tuple]:
for u in graph:
UpperCamelCase__ = math.inf
UpperCamelCase__ = None
UpperCamelCase__ = 0
UpperCamelCase__ = list(_lowerCAmelCase )
hq.heapify(_lowerCAmelCase )
while h:
UpperCamelCase__ = hq.heappop(_lowerCAmelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCamelCase__ = u
UpperCamelCase__ = u.edges[v.id]
hq.heapify(_lowerCAmelCase )
for i in range(1 , len(_lowerCAmelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def SCREAMING_SNAKE_CASE__( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
from math import factorial
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
UpperCamelCase__ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCamelCase__ = float(factorial(_UpperCamelCase ) )
coefficient /= factorial(_UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__lowercase: int = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase):
@register_to_config
def __init__( self : Union[str, Any], a_ : bool, a_ : Optional[int] = None, a_ : Optional[int] = None ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCamelCase__ = torch.zeros(lowercase_, lowercase_ )
else:
UpperCamelCase__ = None
UpperCamelCase__ = torch.nn.Parameter(lowercase_ )
class UpperCAmelCase ( _lowerCamelCase):
_lowerCamelCase : Optional[Any] = 42
_lowerCamelCase : Union[str, Any] = 42
_lowerCamelCase : Union[str, Any] = 42
_lowerCamelCase : List[Any] = 42
_lowerCamelCase : List[Any] = 42
_lowerCamelCase : Optional[Any] = 42
def __init__( self : int, a_ : VQModel, a_ : CLIPTextModel, a_ : CLIPTokenizer, a_ : TransformeraDModel, a_ : VQDiffusionScheduler, a_ : LearnedClassifierFreeSamplingEmbeddings, ):
"""simple docstring"""
super().__init__()
self.register_modules(
vqvae=lowercase_, transformer=lowercase_, text_encoder=lowercase_, tokenizer=lowercase_, scheduler=lowercase_, learned_classifier_free_sampling_embeddings=lowercase_, )
def lowercase_ ( self : Any, a_ : Any, a_ : int, a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = len(lowercase_ ) if isinstance(lowercase_, lowercase_ ) else 1
# get prompt text embeddings
UpperCamelCase__ = self.tokenizer(
lowercase_, padding="max_length", max_length=self.tokenizer.model_max_length, return_tensors="pt", )
UpperCamelCase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
UpperCamelCase__ = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCamelCase__ = prompt_embeds / prompt_embeds.norm(dim=-1, keepdim=lowercase_ )
# duplicate text embeddings for each generation per prompt
UpperCamelCase__ = prompt_embeds.repeat_interleave(lowercase_, dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCamelCase__ = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCamelCase__ = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase_, 1, 1 )
else:
UpperCamelCase__ = [""] * batch_size
UpperCamelCase__ = text_input_ids.shape[-1]
UpperCamelCase__ = self.tokenizer(
lowercase_, padding="max_length", max_length=lowercase_, truncation=lowercase_, return_tensors="pt", )
UpperCamelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCamelCase__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1, keepdim=lowercase_ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase__ = negative_prompt_embeds.shape[1]
UpperCamelCase__ = negative_prompt_embeds.repeat(1, lowercase_, 1 )
UpperCamelCase__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt, lowercase_, -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : Union[str, List[str]], a_ : int = 100, a_ : float = 5.0, a_ : float = 1.0, a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : Optional[torch.FloatTensor] = None, a_ : Optional[str] = "pil", a_ : bool = True, a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None, a_ : int = 1, ):
"""simple docstring"""
if isinstance(lowercase_, lowercase_ ):
UpperCamelCase__ = 1
elif isinstance(lowercase_, lowercase_ ):
UpperCamelCase__ = len(lowercase_ )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(lowercase_ )}' )
UpperCamelCase__ = batch_size * num_images_per_prompt
UpperCamelCase__ = guidance_scale > 1.0
UpperCamelCase__ = self._encode_prompt(lowercase_, lowercase_, lowercase_ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_, lowercase_ ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(lowercase_ )}.' )
# get the initial completely masked latents unless the user supplied it
UpperCamelCase__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCamelCase__ = self.transformer.num_vector_embeds - 1
UpperCamelCase__ = torch.full(lowercase_, lowercase_ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
f' {self.transformer.num_vector_embeds - 1} (inclusive).' )
UpperCamelCase__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase_, device=self.device )
UpperCamelCase__ = self.scheduler.timesteps.to(self.device )
UpperCamelCase__ = latents
for i, t in enumerate(self.progress_bar(lowercase_ ) ):
# expand the sample if we are doing classifier free guidance
UpperCamelCase__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCamelCase__ = self.transformer(lowercase_, encoder_hidden_states=lowercase_, timestep=lowercase_ ).sample
if do_classifier_free_guidance:
UpperCamelCase__ , UpperCamelCase__ = model_output.chunk(2 )
UpperCamelCase__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase_, dim=1, keepdim=lowercase_ )
UpperCamelCase__ = self.truncate(lowercase_, lowercase_ )
# remove `log(0)`'s (`-inf`s)
UpperCamelCase__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowercase_, timestep=lowercase_, sample=lowercase_, generator=lowercase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_, lowercase_, lowercase_ )
UpperCamelCase__ = self.vqvae.config.vq_embed_dim
UpperCamelCase__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCamelCase__ = self.vqvae.quantize.get_codebook_entry(lowercase_, shape=lowercase_ )
UpperCamelCase__ = self.vqvae.decode(lowercase_, force_not_quantize=lowercase_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowercase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase_ )
def lowercase_ ( self : str, a_ : torch.FloatTensor, a_ : float ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = torch.sort(lowercase_, 1, descending=lowercase_ )
UpperCamelCase__ = torch.exp(lowercase_ )
UpperCamelCase__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCamelCase__ = torch.full_like(keep_mask[:, 0:1, :], lowercase_ )
UpperCamelCase__ = torch.cat((all_true, keep_mask), dim=1 )
UpperCamelCase__ = keep_mask[:, :-1, :]
UpperCamelCase__ = keep_mask.gather(1, indices.argsort(1 ) )
UpperCamelCase__ = log_p_x_0.clone()
UpperCamelCase__ = -torch.inf # -inf = log(0)
return rv
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: Optional[Any] = logging.get_logger(__name__)
__lowercase: Optional[int] = {
'''microsoft/trocr-base-handwritten''': (
'''https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'''
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase ( A__):
_lowerCamelCase : Optional[Any] = 'trocr'
_lowerCamelCase : Union[str, Any] = ['past_key_values']
_lowerCamelCase : str = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int], a_ : Optional[Any]=5_0265, a_ : List[str]=1024, a_ : Any=12, a_ : List[str]=16, a_ : Optional[Any]=4096, a_ : Union[str, Any]="gelu", a_ : str=512, a_ : Optional[int]=0.1, a_ : str=0.0, a_ : List[str]=0.0, a_ : Optional[Any]=2, a_ : Optional[int]=0.02, a_ : Any=0.0, a_ : List[Any]=True, a_ : Any=False, a_ : Union[str, Any]=True, a_ : Optional[int]=True, a_ : Optional[Any]=1, a_ : Optional[Any]=0, a_ : Optional[int]=2, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = activation_function
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = init_std
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = scale_embedding
UpperCamelCase__ = use_learned_position_embeddings
UpperCamelCase__ = layernorm_embedding
super().__init__(
pad_token_id=lowerCamelCase__, bos_token_id=lowerCamelCase__, eos_token_id=lowerCamelCase__, decoder_start_token_id=lowerCamelCase__, **lowerCamelCase__, )
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: str = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class UpperCAmelCase ( a_):
_lowerCamelCase : Any = 'dpt'
def __init__( self : List[Any], a_ : List[Any]=768, a_ : int=12, a_ : List[Any]=12, a_ : int=3072, a_ : List[Any]="gelu", a_ : Any=0.0, a_ : Union[str, Any]=0.0, a_ : Union[str, Any]=0.02, a_ : int=1e-1_2, a_ : Optional[int]=384, a_ : Dict=16, a_ : Union[str, Any]=3, a_ : Dict=False, a_ : int=True, a_ : Dict=[2, 5, 8, 11], a_ : Dict="project", a_ : List[Any]=[4, 2, 1, 0.5], a_ : str=[96, 192, 384, 768], a_ : int=256, a_ : Union[str, Any]=-1, a_ : Optional[Any]=False, a_ : Optional[Any]=True, a_ : str=0.4, a_ : List[str]=255, a_ : List[str]=0.1, a_ : int=[1, 1024, 24, 24], a_ : List[Any]=[0, 1], a_ : List[Any]=None, **a_ : Union[str, Any], ):
"""simple docstring"""
super().__init__(**lowercase_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info("Initializing the config with a `BiT` backbone." )
UpperCamelCase__ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
UpperCamelCase__ = BitConfig(**lowercase_ )
elif isinstance(lowercase_, lowercase_ ):
logger.info("Initializing the config with a `BiT` backbone." )
UpperCamelCase__ = BitConfig(**lowercase_ )
elif isinstance(lowercase_, lowercase_ ):
UpperCamelCase__ = backbone_config
else:
raise ValueError(
f'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
UpperCamelCase__ = backbone_featmap_shape
UpperCamelCase__ = neck_ignore_stages
if readout_type != "project":
raise ValueError("Readout type must be \'project\' when using `DPT-hybrid` mode." )
else:
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = []
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError("Readout_type must be one of [\'ignore\', \'add\', \'project\']" )
UpperCamelCase__ = readout_type
UpperCamelCase__ = reassemble_factors
UpperCamelCase__ = neck_hidden_sizes
UpperCamelCase__ = fusion_hidden_size
UpperCamelCase__ = head_in_index
UpperCamelCase__ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
UpperCamelCase__ = use_auxiliary_head
UpperCamelCase__ = auxiliary_loss_weight
UpperCamelCase__ = semantic_loss_ignore_index
UpperCamelCase__ = semantic_classifier_dropout
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
UpperCamelCase__ = self.backbone_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Union[str, Any] = logging.get_logger(__name__)
__lowercase: str = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
UpperCamelCase__ = k.replace(_lowercase , _lowercase )
if k.startswith("encoder" ):
UpperCamelCase__ = k.replace(".attn" , ".self_attn" )
UpperCamelCase__ = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ = k.replace("norm2" , "final_layer_norm" )
elif k.startswith("decoder" ):
UpperCamelCase__ = k.replace("norm1" , "self_attn_layer_norm" )
UpperCamelCase__ = k.replace("norm2" , "encoder_attn_layer_norm" )
UpperCamelCase__ = k.replace("norm3" , "final_layer_norm" )
return k
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = [
"model.encoder.layernorm_embedding.weight",
"model.encoder.layernorm_embedding.bias",
"model.decoder.layernorm_embedding.weight",
"model.decoder.layernorm_embedding.bias",
]
for k in keys:
UpperCamelCase__ = sd.pop(_lowercase )
UpperCamelCase__ = k.replace("layernorm_embedding" , "layer_norm" )
assert new_k not in sd
UpperCamelCase__ = v
__lowercase: Any = ["START"]
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = torch.load(_lowercase , map_location="cpu" )
UpperCamelCase__ = model["model"]
UpperCamelCase__ = BlenderbotConfig.from_json_file(_lowercase )
UpperCamelCase__ = BlenderbotForConditionalGeneration(_lowercase )
UpperCamelCase__ = m.model.state_dict().keys()
UpperCamelCase__ = []
UpperCamelCase__ = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
UpperCamelCase__ = rename_state_dict_key(_lowercase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
UpperCamelCase__ = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowercase )
m.model.load_state_dict(_lowercase , strict=_lowercase )
m.half()
m.save_pretrained(_lowercase )
if __name__ == "__main__":
__lowercase: Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase :
def __init__( self : List[Any], a_ : Dict, a_ : Dict=13, a_ : Optional[int]=7, a_ : int=True, a_ : Tuple=True, a_ : Any=True, a_ : str=True, a_ : Tuple=99, a_ : Dict=32, a_ : int=2, a_ : Tuple=4, a_ : List[Any]=37, a_ : Tuple="gelu", a_ : List[str]=0.1, a_ : Optional[int]=0.1, a_ : Dict=512, a_ : Optional[int]=16, a_ : int=2, a_ : str=0.02, a_ : Dict=False, a_ : Any=True, a_ : List[Any]="None", a_ : int=3, a_ : Optional[Any]=4, a_ : List[str]=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=_SCREAMING_SNAKE_CASE, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Any, a_ : Dict, a_ : Optional[Any], a_ : Tuple, a_ : Optional[int], a_ : Dict, a_ : Optional[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : Tuple, a_ : List[Any], a_ : Dict, a_ : Tuple, a_ : Union[str, Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForMaskedLM(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : Optional[int], a_ : List[str], a_ : int, a_ : Optional[Any], a_ : Optional[int], a_ : int, a_ : Tuple, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Union[str, Any], a_ : Tuple, a_ : Optional[int], a_ : Dict, a_ : List[Any], a_ : Any, a_ : Dict, a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : str, a_ : str, a_ : List[str], a_ : Optional[Any], a_ : int, a_ : List[Any], a_ : Optional[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
UpperCamelCase__ = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
UpperCamelCase__
) = config_and_inputs
UpperCamelCase__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase):
_lowerCamelCase : Dict = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=_SCREAMING_SNAKE_CASE, hidden_size=37 )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class UpperCAmelCase ( unittest.TestCase):
@unittest.skip(reason="Model not available yet" )
def lowercase_ ( self : int ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCamelCase__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ = model(_SCREAMING_SNAKE_CASE, attention_mask=_SCREAMING_SNAKE_CASE )[0]
UpperCamelCase__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], _SCREAMING_SNAKE_CASE, atol=1e-4 )
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = set({"(", "[", "{"} )
UpperCamelCase__ = set({")", "]", "}"} )
UpperCamelCase__ = {"{": "}", "[": "]", "(": ")"}
for i in range(len(_a ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(_a ) == 0 or (len(_a ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(_a ) == 0
def SCREAMING_SNAKE_CASE__( ) -> int:
'''simple docstring'''
UpperCamelCase__ = input("Enter sequence of brackets: " )
if is_balanced(_a ):
print(_a , "is balanced" )
else:
print(_a , "is not balanced" )
if __name__ == "__main__":
main()
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] = False ) -> float:
'''simple docstring'''
if not arr:
return 0
UpperCamelCase__ = 0 if allow_empty_subarrays else float("-inf" )
UpperCamelCase__ = 0.0
for num in arr:
UpperCamelCase__ = max(0 if allow_empty_subarrays else num , curr_sum + num )
UpperCamelCase__ = max(__lowerCAmelCase , __lowerCAmelCase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase: str = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
__lowercase = get_logger(__name__)
class UpperCAmelCase ( enum.Enum):
_lowerCamelCase : Optional[Any] = """all_checks"""
_lowerCamelCase : Dict = """basic_checks"""
_lowerCamelCase : Dict = """no_checks"""
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[dict] , _UpperCamelCase : dict , _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
if expected_checksums is None:
logger.info("Unable to verify checksums." )
return
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(_lowercase ) - set(_lowercase ) ) )
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise UnexpectedDownloadedFile(str(set(_lowercase ) - set(_lowercase ) ) )
UpperCamelCase__ = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
UpperCamelCase__ = " for " + verification_name if verification_name is not None else ""
if len(_lowercase ) > 0:
raise NonMatchingChecksumError(
F'Checksums didn\'t match{for_verification_name}:\n'
F'{bad_urls}\n'
"Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" )
logger.info("All the checksums matched successfully" + for_verification_name )
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
class UpperCAmelCase ( a__):
pass
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[dict] , _UpperCamelCase : dict ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info("Unable to verify splits sizes." )
return
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise ExpectedMoreSplits(str(set(_lowercase ) - set(_lowercase ) ) )
if len(set(_lowercase ) - set(_lowercase ) ) > 0:
raise UnexpectedSplits(str(set(_lowercase ) - set(_lowercase ) ) )
UpperCamelCase__ = [
{"expected": expected_splits[name], "recorded": recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(_lowercase ) > 0:
raise NonMatchingSplitsSizesError(str(_lowercase ) )
logger.info("All the splits matched successfully." )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : bool = True ) -> dict:
'''simple docstring'''
if record_checksum:
UpperCamelCase__ = shaaaa()
with open(_lowercase , "rb" ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , B"" ):
m.update(_lowercase )
UpperCamelCase__ = m.hexdigest()
else:
UpperCamelCase__ = None
return {"num_bytes": os.path.getsize(_lowercase ), "checksum": checksum}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__lowercase: str = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowercase: int = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = _TestCommandArgs(dataset=snake_case__ , all_configs=snake_case__ , save_infos=snake_case__ )
UpperCamelCase__ = TestCommand(*snake_case__ )
test_command.run()
UpperCamelCase__ = os.path.join(snake_case__ , "README.md" )
assert os.path.exists(snake_case__ )
UpperCamelCase__ = DatasetInfosDict.from_directory(snake_case__ )
UpperCamelCase__ = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_35_15_63,
"num_examples": 1_00_00,
},
{
"name": "validation",
"num_bytes": 23_84_18,
"num_examples": 10_00,
},
] , download_size=3_94_06_80 , dataset_size=2_58_99_81 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
UpperCamelCase__ , UpperCamelCase__ = getattr(dataset_infos["default"] , snake_case__ ), getattr(expected_dataset_infos["default"] , snake_case__ )
if key == "num_bytes":
assert is_apercent_close(snake_case__ , snake_case__ )
elif key == "splits":
assert list(snake_case__ ) == list(snake_case__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = -1
UpperCamelCase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase__ = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase__ = a * b * c
if candidate >= product:
UpperCamelCase__ = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__lowercase: Any = numpy.array([0, 0])
__lowercase: Dict = numpy.array([0.5, 0.8_6_6_0_2_5_4])
__lowercase: int = numpy.array([1, 0])
__lowercase: str = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase__ = initial_vectors
for _ in range(SCREAMING_SNAKE_CASE__ ):
UpperCamelCase__ = iteration_step(SCREAMING_SNAKE_CASE__ )
return vectors
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase__ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCamelCase__ = vectors[i + 1]
new_vectors.append(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] ) -> numpy.ndarray:
'''simple docstring'''
UpperCamelCase__ = numpy.radians(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = numpy.cos(SCREAMING_SNAKE_CASE__ ), numpy.sin(SCREAMING_SNAKE_CASE__ )
UpperCamelCase__ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> None:
'''simple docstring'''
UpperCamelCase__ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCamelCase__ = zip(*SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Any = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
__lowercase: Dict = "path-to-your-trained-model"
__lowercase: Tuple = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
__lowercase: Union[str, Any] = "A photo of sks dog in a bucket"
__lowercase: List[Any] = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
sample_size=(32, 64), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return model
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDConditionModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=10, )
return model
@property
def lowercase_ ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = AutoencoderKL(
sample_size=(128, 64), in_channels=1, out_channels=1, latent_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), )
UpperCamelCase__ = UNetaDModel(
sample_size=(64, 32), in_channels=1, out_channels=1, layers_per_block=2, block_out_channels=(128, 128), down_block_types=("AttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "AttnUpBlock2D"), )
return vqvae, unet
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase__ = Mel(
x_res=self.dummy_unet.config.sample_size[1], y_res=self.dummy_unet.config.sample_size[0], )
UpperCamelCase__ = DDPMScheduler()
UpperCamelCase__ = AudioDiffusionPipeline(vqvae=UpperCamelCase__, unet=self.dummy_unet, mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, steps=4 )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, steps=4, return_dict=UpperCamelCase__ )
UpperCamelCase__ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.frombuffer(image_from_tuple.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1], y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0], )
UpperCamelCase__ = DDIMScheduler()
UpperCamelCase__ = self.dummy_vqvae_and_unet
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=dummy_vqvae_and_unet[1], mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ = np.random.uniform(-1, 1, ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(raw_audio=UpperCamelCase__, generator=UpperCamelCase__, start_step=5, steps=10 )
UpperCamelCase__ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCamelCase__ = self.dummy_unet_condition
UpperCamelCase__ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0], unet=UpperCamelCase__, mel=UpperCamelCase__, scheduler=UpperCamelCase__ )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
np.random.seed(0 )
UpperCamelCase__ = torch.rand((1, 1, 10) )
UpperCamelCase__ = pipe(generator=UpperCamelCase__, encoding=UpperCamelCase__ )
UpperCamelCase__ = output.images[0]
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = torch_device
UpperCamelCase__ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCamelCase__ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
UpperCamelCase__ = torch.Generator(device=UpperCamelCase__ ).manual_seed(42 )
UpperCamelCase__ = pipe(generator=UpperCamelCase__ )
UpperCamelCase__ = output.audios[0]
UpperCamelCase__ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCamelCase__ = np.frombuffer(image.tobytes(), dtype="uint8" )[:10]
UpperCamelCase__ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: List[str] = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase ( _a , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = PriorTransformer
_lowerCamelCase : List[Any] = """hidden_states"""
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 8
UpperCamelCase__ = 7
UpperCamelCase__ = floats_tensor((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = floats_tensor((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : Any, a_ : List[str]=0 ):
"""simple docstring"""
torch.manual_seed(__lowerCAmelCase )
UpperCamelCase__ = 4
UpperCamelCase__ = 8
UpperCamelCase__ = 7
UpperCamelCase__ = torch.randn((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = torch.randn((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return (4, 8)
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return (4, 8)
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_attention_heads": 2,
"attention_head_dim": 4,
"num_layers": 2,
"embedding_dim": 8,
"num_embeddings": 7,
"additional_embeddings": 4,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = PriorTransformer.from_pretrained(
"hf-internal-testing/prior-dummy", output_loading_info=__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(__lowerCAmelCase )
UpperCamelCase__ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.prepare_init_args_and_inputs_for_common()
UpperCamelCase__ = self.model_class(**__lowerCAmelCase )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["hidden_states", "timestep"]
self.assertListEqual(arg_names[:2], __lowerCAmelCase )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" )
UpperCamelCase__ = model.to(__lowerCAmelCase )
if hasattr(__lowerCAmelCase, "set_default_attn_processor" ):
model.set_default_attn_processor()
UpperCamelCase__ = self.get_dummy_seed_input()
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )[0]
UpperCamelCase__ = output[0, :5].flatten().cpu()
print(__lowerCAmelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCamelCase__ = torch.tensor([-1.3_436, -0.2_870, 0.7_538, 0.4_368, -0.0_239] )
self.assertTrue(torch_all_close(__lowerCAmelCase, __lowerCAmelCase, rtol=1e-2 ) )
@slow
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Union[str, Any], a_ : int=1, a_ : Any=768, a_ : Dict=77, a_ : Optional[Any]=0 ):
"""simple docstring"""
torch.manual_seed(__lowerCAmelCase )
UpperCamelCase__ = batch_size
UpperCamelCase__ = embedding_dim
UpperCamelCase__ = num_embeddings
UpperCamelCase__ = torch.randn((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = torch.randn((batch_size, embedding_dim) ).to(__lowerCAmelCase )
UpperCamelCase__ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__lowerCAmelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5_861, 0.1_283, -0.0_931, 0.0_882, 0.4_476, 0.1_329, -0.0_498, 0.0_640]],
[37, [-0.4_913, 0.0_110, -0.0_483, 0.0_541, 0.4_954, -0.0_170, 0.0_354, 0.1_651]],
# fmt: on
] )
def lowercase_ ( self : Dict, a_ : Optional[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior" )
model.to(__lowerCAmelCase )
UpperCamelCase__ = self.get_dummy_seed_input(seed=__lowerCAmelCase )
with torch.no_grad():
UpperCamelCase__ = model(**__lowerCAmelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCamelCase__ = sample[0, :8].flatten().cpu()
print(__lowerCAmelCase )
UpperCamelCase__ = torch.tensor(__lowerCAmelCase )
assert torch_all_close(__lowerCAmelCase, __lowerCAmelCase, atol=1e-3 )
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> int:
'''simple docstring'''
UpperCamelCase__ = torch.load(__a , map_location="cpu" )
if "model" in sd.keys():
UpperCamelCase__ = torch.load(__a , map_location="cpu" )['model']
# pop unnecessary weights
UpperCamelCase__ = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__a )
UpperCamelCase__ = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
UpperCamelCase__ = sd.pop(__a )
UpperCamelCase__ = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
UpperCamelCase__ = sd[key]
# We split QKV in separate Q,K,V
UpperCamelCase__ = key.replace(".qkv_proj." , ".q_proj." )
UpperCamelCase__ = key.replace(".qkv_proj." , ".k_proj." )
UpperCamelCase__ = key.replace(".qkv_proj." , ".v_proj." )
UpperCamelCase__ = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
UpperCamelCase__ = torch.split(__a , depth // 3 , dim=0 )
UpperCamelCase__ = q
UpperCamelCase__ = k
UpperCamelCase__ = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any]=None ) -> str:
'''simple docstring'''
UpperCamelCase__ = load_checkpoint(__a )
if config is not None:
UpperCamelCase__ = OPTConfig.from_pretrained(__a )
else:
UpperCamelCase__ = OPTConfig()
UpperCamelCase__ = OPTModel(__a ).half().eval()
model.load_state_dict(__a )
# Check results
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
if __name__ == "__main__":
__lowercase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
__lowercase: Optional[Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
_lowerCamelCase : Any = MobileBertTokenizer
_lowerCamelCase : int = MobileBertTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : List[str] = True
_lowerCamelCase : Dict = filter_non_english
_lowerCamelCase : str = 'google/mobilebert-uncased'
def lowercase_ ( self : Any ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
UpperCamelCase__ = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowercase_ ( self : Union[str, Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = """UNwant\u00E9d,running"""
UpperCamelCase__ = """unwanted, running"""
return input_text, output_text
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_, ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [9, 6, 7, 12, 10, 11] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = """UNwant\u00E9d,running"""
UpperCamelCase__ = tokenizer.tokenize(a_ )
UpperCamelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokenizer.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
# With lower casing
UpperCamelCase__ = self.get_tokenizer(do_lower_case=a_ )
UpperCamelCase__ = self.get_rust_tokenizer(do_lower_case=a_ )
UpperCamelCase__ = """UNwant\u00E9d,running"""
UpperCamelCase__ = tokenizer.tokenize(a_ )
UpperCamelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokenizer.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ), ["ah", "\u535A", "\u63A8", "zz"] )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ), ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ), ["hello"] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ), ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ), ["h\u00E9llo"] )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ), ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ), ["hello"] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ), ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ), ["hello"] )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ), ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ), ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_, strip_accents=a_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ), ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = BasicTokenizer(do_lower_case=a_, never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCamelCase__ = {}
for i, token in enumerate(a_ ):
UpperCamelCase__ = i
UpperCamelCase__ = WordpieceTokenizer(vocab=a_, unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ), [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ), ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ), ["[UNK]", "runn", "##ing"] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(a_ ) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] )
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
UpperCamelCase__ = tokenizer.encode("sequence builders", add_special_tokens=a_ )
UpperCamelCase__ = tokenizer.encode("multi-sequence build", add_special_tokens=a_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_, a_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
UpperCamelCase__ = tokenizer_r.encode_plus(
a_, return_attention_mask=a_, return_token_type_ids=a_, return_offsets_mapping=a_, add_special_tokens=a_, )
UpperCamelCase__ = tokenizer_r.do_lower_case if hasattr(a_, "do_lower_case" ) else False
UpperCamelCase__ = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"] )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = ["""的""", """人""", """有"""]
UpperCamelCase__ = """""".join(a_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = tokenizer_p.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = tokenizer_r.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(a_ )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = False
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = tokenizer_r.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = tokenizer_p.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = tokenizer_r.convert_ids_to_tokens(a_ )
UpperCamelCase__ = tokenizer_p.convert_ids_to_tokens(a_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase__ = [
f'##{token}' if idx != 0 else token for idx, token in enumerate(a_ )
]
self.assertListEqual(a_, a_ )
self.assertListEqual(a_, a_ )
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
stooge(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) - 1 )
return arr
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
UpperCamelCase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
UpperCamelCase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(lowerCAmelCase__ , lowerCAmelCase__ , (h - t) )
# Recursively sort last 2/3 elements
stooge(lowerCAmelCase__ , i + t , (lowerCAmelCase__) )
# Recursively sort first 2/3 elements
stooge(lowerCAmelCase__ , lowerCAmelCase__ , (h - t) )
if __name__ == "__main__":
__lowercase: str = input("Enter numbers separated by a comma:\n").strip()
__lowercase: str = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : bool , _UpperCamelCase : list[int] , _UpperCamelCase : float ) -> str:
'''simple docstring'''
if depth < 0:
raise ValueError("Depth cannot be less than 0" )
if not scores:
raise ValueError("Scores cannot be empty" )
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , minimax(depth + 1 , node_index * 2 + 1 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
)
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = [90, 23, 6, 33, 21, 65, 1_23, 3_44_23]
UpperCamelCase__ = math.log(len(_UpperCamelCase ) , 2 )
print(F'Optimal value : {minimax(0 , 0 , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__lowercase: List[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
__lowercase: int = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=["rouge2", "rougeL"] )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , bootstrap_aggregation=_UpperCamelCase , rouge_keys=["rouge2"] )
assert (
pd.DataFrame(no_aggregation["rouge2"] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["rouge2"] ).fmeasure.mean()
)
def SCREAMING_SNAKE_CASE__( ) -> Any:
UpperCamelCase__ = "rougeLsum"
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=[k] )[k]
assert score > score_no_sep
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
UpperCamelCase__ = ["rouge1", "rouge2", "rougeL"]
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase , rouge_keys=_UpperCamelCase )
assert score_sep == score_no_sep
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
UpperCamelCase__ = [
"Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.",
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .",
]
UpperCamelCase__ = [
"Margot Frank, died in 1945, a month earlier than previously thought.",
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"
" the final seconds on board Flight 9525.",
]
assert calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase ) == calculate_rouge(_UpperCamelCase , _UpperCamelCase , newline_sep=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
UpperCamelCase__ = [
"\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" "
]
UpperCamelCase__ = [
" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."
]
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=["rougeLsum"] , newline_sep=_UpperCamelCase )["rougeLsum"]
UpperCamelCase__ = calculate_rouge(_UpperCamelCase , _UpperCamelCase , rouge_keys=["rougeLsum"] )["rougeLsum"]
assert new_score > prev_score
def SCREAMING_SNAKE_CASE__( ) -> Any:
UpperCamelCase__ = Path("examples/seq2seq/test_data/wmt_en_ro" )
UpperCamelCase__ = calculate_rouge_path(data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = calculate_rouge_path(
data_dir.joinpath("test.source" ) , data_dir.joinpath("test.target" ) , bootstrap_aggregation=_UpperCamelCase )
assert isinstance(_UpperCamelCase , _UpperCamelCase )
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = split_dict._to_yaml_list()
assert len(_UpperCamelCase ) == len(_UpperCamelCase )
UpperCamelCase__ = SplitDict._from_yaml_list(_UpperCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
UpperCamelCase__ = None
# the split name of split_dict takes over the name of the split info object
UpperCamelCase__ = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=_UpperCamelCase ), SplitInfo(dataset_name="my_dataset" )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCamelCase ) )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCamelCase ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
UpperCamelCase__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
UpperCamelCase__ = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCamelCase, variant=__lowerCamelCase ) )
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A, "hidden_sizes" ) )
self.parent.assertTrue(hasattr(_A, "num_attention_heads" ) )
class UpperCAmelCase :
def __init__( self : Dict, a_ : str, a_ : Optional[Any]=13, a_ : List[str]=64, a_ : Optional[Any]=3, a_ : Any=3, a_ : Tuple=2, a_ : Any=1, a_ : Tuple=16, a_ : Dict=[128, 256, 384], a_ : str=[4, 6, 8], a_ : Optional[int]=[2, 3, 4], a_ : Any=[16, 16, 16], a_ : str=0, a_ : Tuple=[2, 2, 2], a_ : Optional[Any]=[2, 2, 2], a_ : int=0.02, a_ : Any=True, a_ : int=True, a_ : Any=2, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = kernel_size
UpperCamelCase__ = stride
UpperCamelCase__ = padding
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = depths
UpperCamelCase__ = key_dim
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = patch_size
UpperCamelCase__ = attention_ratio
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = initializer_range
UpperCamelCase__ = [
['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return LevitConfig(
image_size=self.image_size, num_channels=self.num_channels, kernel_size=self.kernel_size, stride=self.stride, padding=self.padding, patch_size=self.patch_size, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, depths=self.depths, key_dim=self.key_dim, drop_path_rate=self.drop_path_rate, mlp_ratio=self.mlp_ratio, attention_ratio=self.attention_ratio, initializer_range=self.initializer_range, down_ops=self.down_ops, )
def lowercase_ ( self : Union[str, Any], a_ : List[Any], a_ : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = LevitModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__ = model(_A )
UpperCamelCase__ = (self.image_size, self.image_size)
UpperCamelCase__ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
UpperCamelCase__ = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]), )
def lowercase_ ( self : List[str], a_ : Dict, a_ : Tuple, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = LevitForImageClassification(_A )
model.to(_A )
model.eval()
UpperCamelCase__ = model(_A, labels=_A )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
_lowerCamelCase : Any = (
{
"""feature-extraction""": LevitModel,
"""image-classification""": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
_lowerCamelCase : Dict = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : Union[str, Any] = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[Any] = False
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = LevitModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=_A, has_text_modality=_A, hidden_size=37 )
def lowercase_ ( self : Dict ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason="Levit does not output attentions" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_A )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1], _A )
def lowercase_ ( self : Any ):
"""simple docstring"""
def check_hidden_states_output(a_ : Tuple, a_ : Union[str, Any], a_ : Union[str, Any] ):
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(_A, _A ) )
UpperCamelCase__ = outputs.hidden_states
UpperCamelCase__ = len(self.model_tester.depths ) + 1
self.assertEqual(len(_A ), _A )
UpperCamelCase__ = (self.model_tester.image_size, self.model_tester.image_size)
UpperCamelCase__ = image_size[0], image_size[1]
for _ in range(4 ):
UpperCamelCase__ = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
UpperCamelCase__ = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [
height * width,
self.model_tester.hidden_sizes[0],
], )
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(_A, _A, _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(_A, _A, _A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self : Dict ):
"""simple docstring"""
pass
def lowercase_ ( self : List[str], a_ : str, a_ : Union[str, Any], a_ : Optional[int]=False ):
"""simple docstring"""
UpperCamelCase__ = super()._prepare_for_class(_A, _A, return_labels=_A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(_A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = self._prepare_for_class(_A, _A, return_labels=_A )
UpperCamelCase__ = model(**_A ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCamelCase__ = False
UpperCamelCase__ = True
for model_class in self.all_model_classes:
if model_class in get_values(_A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
UpperCamelCase__ = model_class(_A )
model.gradient_checkpointing_enable()
model.to(_A )
model.train()
UpperCamelCase__ = self._prepare_for_class(_A, _A, return_labels=_A )
UpperCamelCase__ = model(**_A ).loss
loss.backward()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(_A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
UpperCamelCase__ = problem_type['title']
UpperCamelCase__ = problem_type['num_labels']
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = self._prepare_for_class(_A, _A, return_labels=_A )
if problem_type["num_labels"] > 1:
UpperCamelCase__ = inputs['labels'].unsqueeze(1 ).repeat(1, problem_type["num_labels"] )
UpperCamelCase__ = inputs['labels'].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=_A ) as warning_list:
UpperCamelCase__ = model(**_A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = LevitModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : int ):
"""simple docstring"""
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
_A )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_A, return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**_A )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, _A )
UpperCamelCase__ = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3], _A, atol=1e-4 ) )
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import torch
from transformers import AutoModel
class UpperCAmelCase ( torch.nn.Module):
def __init__( self : Any, a_ : str="sayef/fsner-bert-base-uncased" ):
"""simple docstring"""
super(__snake_case, self ).__init__()
UpperCamelCase__ = AutoModel.from_pretrained(__snake_case, return_dict=__snake_case )
UpperCamelCase__ = torch.nn.CosineSimilarity(3, 1e-0_8 )
UpperCamelCase__ = torch.nn.Softmax(dim=1 )
def lowercase_ ( self : List[str], **a_ : int ):
"""simple docstring"""
return self.bert(**__snake_case ).last_hidden_state
def lowercase_ ( self : Optional[int], a_ : Union[str, Any] ):
"""simple docstring"""
return token_embeddings.sum(2, keepdim=__snake_case )
def lowercase_ ( self : str, a_ : Tuple, a_ : List[Any], a_ : Optional[Any]=1 ):
"""simple docstring"""
return self.softmax(T * self.cos(__snake_case, __snake_case ) )
def lowercase_ ( self : int, a_ : int, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = W_supports['sizes'].tolist()
UpperCamelCase__ = W_supports['start_token_id'].item()
UpperCamelCase__ = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
UpperCamelCase__ = self.BERT(**__snake_case )
UpperCamelCase__ = self.BERT(**__snake_case )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = W_supports['input_ids'] == start_token_id
UpperCamelCase__ = W_supports['input_ids'] == end_token_id
for i, size in enumerate(__snake_case ):
if i == 0:
UpperCamelCase__ = 0
else:
UpperCamelCase__ = support_sizes[i - 1]
UpperCamelCase__ = S[s : s + size][start_token_masks[s : s + size]]
UpperCamelCase__ = S[s : s + size][end_token_masks[s : s + size]]
UpperCamelCase__ = torch.matmul(q[i], s_start.T ).sum(1 ).softmax(0 )
UpperCamelCase__ = torch.matmul(q[i], s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
UpperCamelCase__ = torch.vstack((p_starts, p_start) )
UpperCamelCase__ = torch.vstack((p_ends, p_end) )
else:
UpperCamelCase__ = p_start
UpperCamelCase__ = p_end
return p_starts, p_ends
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
import random
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = a[left_index]
UpperCamelCase__ = left_index + 1
for j in range(left_index + 1 , lowerCAmelCase__ ):
if a[j] < pivot:
UpperCamelCase__ , UpperCamelCase__ = a[i], a[j]
i += 1
UpperCamelCase__ , UpperCamelCase__ = a[i - 1], a[left_index]
return i - 1
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
if left < right:
UpperCamelCase__ = random.randint(lowerCAmelCase__ , right - 1 )
UpperCamelCase__ , UpperCamelCase__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
UpperCamelCase__ = partition(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
quick_sort_random(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # recursive quicksort to the left of the pivot point
quick_sort_random(
lowerCAmelCase__ , pivot_index + 1 , lowerCAmelCase__ ) # recursive quicksort to the right of the pivot point
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
UpperCamelCase__ = input("Enter numbers separated by a comma:\n" ).strip()
UpperCamelCase__ = [int(lowerCAmelCase__ ) for item in user_input.split("," )]
quick_sort_random(lowerCAmelCase__ , 0 , len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCamelCase__ = MaskFormerConfig(backbone_config=_UpperCamelCase )
UpperCamelCase__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ = 8_47
UpperCamelCase__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ = 1_71
UpperCamelCase__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCamelCase__ = 1_33
UpperCamelCase__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ = 65
UpperCamelCase__ = "mapillary-vistas-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> int:
'''simple docstring'''
UpperCamelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__( ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : List[Any] , _UpperCamelCase : int = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , "rb" ) as f:
UpperCamelCase__ = pickle.load(_UpperCamelCase )
UpperCamelCase__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
UpperCamelCase__ = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase__ = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ = 65
elif "cityscapes" in model_name:
UpperCamelCase__ = 6_55_35
else:
UpperCamelCase__ = 2_55
UpperCamelCase__ = True if "ade" in model_name else False
UpperCamelCase__ = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
UpperCamelCase__ = image_processor(_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = model(**_UpperCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
__lowercase: Any = "3"
print("Python version:", sys.version)
print("OS platform:", platform.platform())
print("OS architecture:", platform.machine())
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
except ImportError:
print("Torch version:", None)
try:
import transformers
print("transformers version:", transformers.__version__)
except ImportError:
print("transformers version:", None)
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
def __init__( self : Union[str, Any], a_ : Optional[Any], a_ : str=2, a_ : Dict=True, a_ : Optional[Any]=False, a_ : Tuple=10, a_ : Any=3, a_ : Dict=32 * 4, a_ : str=32 * 6, a_ : int=4, a_ : Dict=32, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = is_training
UpperCamelCase__ = use_auxiliary_loss
UpperCamelCase__ = num_queries
UpperCamelCase__ = num_channels
UpperCamelCase__ = min_size
UpperCamelCase__ = max_size
UpperCamelCase__ = num_labels
UpperCamelCase__ = mask_feature_size
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_A )
UpperCamelCase__ = torch.ones([self.batch_size, self.min_size, self.max_size], device=_A )
UpperCamelCase__ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=_A ) > 0.5
).float()
UpperCamelCase__ = (torch.rand((self.batch_size, self.num_labels), device=_A ) > 0.5).long()
UpperCamelCase__ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=128, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase_ ( self : str, a_ : Optional[int], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = output.encoder_hidden_states
UpperCamelCase__ = output.pixel_decoder_hidden_states
UpperCamelCase__ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ), len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_A ), config.decoder_config.decoder_layers )
def lowercase_ ( self : Tuple, a_ : List[str], a_ : Dict, a_ : Any, a_ : Optional[int]=False ):
"""simple docstring"""
with torch.no_grad():
UpperCamelCase__ = MaskFormerModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase__ = model(pixel_values=_A, pixel_mask=_A )
UpperCamelCase__ = model(_A, output_hidden_states=_A )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_A, _A )
def lowercase_ ( self : Tuple, a_ : int, a_ : str, a_ : Dict, a_ : Optional[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerForInstanceSegmentation(config=_A )
model.to(_A )
model.eval()
def comm_check_on_output(a_ : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCamelCase__ = model(pixel_values=_A, pixel_mask=_A )
UpperCamelCase__ = model(_A )
comm_check_on_output(_A )
UpperCamelCase__ = model(
pixel_values=_A, pixel_mask=_A, mask_labels=_A, class_labels=_A )
comm_check_on_output(_A )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape, torch.Size([1] ) )
@require_torch
class UpperCAmelCase ( snake_case__ , snake_case__ , unittest.TestCase):
_lowerCamelCase : List[str] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
_lowerCamelCase : str = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : str = False
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Optional[int] = False
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=_A, has_text_modality=_A )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A, **_A, output_hidden_states=_A )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*_A )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowercase_ ( self : str ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase_ ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_A )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], _A )
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
UpperCamelCase__ = MaskFormerModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = (self.model_tester.min_size,) * 2
UpperCamelCase__ = {
"pixel_values": torch.randn((2, 3, *size), device=_A ),
"mask_labels": torch.randn((2, 10, *size), device=_A ),
"class_labels": torch.zeros(2, 10, device=_A ).long(),
}
UpperCamelCase__ = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(_A )
UpperCamelCase__ = model(**_A )
self.assertTrue(outputs.loss is not None )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(_A, **_A, output_hidden_states=_A )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(_A ).to(_A )
UpperCamelCase__ = model(**_A, output_attentions=_A )
self.assertTrue(outputs.attentions is not None )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = model(_A, mask_labels=_A, class_labels=_A ).loss
loss.backward()
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.all_model_classes[1]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ = True
UpperCamelCase__ = True
UpperCamelCase__ = model_class(_A )
model.to(_A )
model.train()
UpperCamelCase__ = model(_A, mask_labels=_A, class_labels=_A )
UpperCamelCase__ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
UpperCamelCase__ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCamelCase__ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_A )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__lowercase: Dict = 1e-4
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(_A )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
UpperCamelCase__ = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], _A, atol=_A ) )
UpperCamelCase__ = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], _A, atol=_A ) )
UpperCamelCase__ = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]] ).to(_A )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
UpperCamelCase__ = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
UpperCamelCase__ = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _A, atol=_A ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[
[1.6_5_1_2e0_0, -5.2_5_7_2e0_0, -3.3_5_1_9e0_0],
[3.6_1_6_9e-0_2, -5.9_0_2_5e0_0, -2.9_3_1_3e0_0],
[1.0_7_6_6e-0_4, -7.7_6_3_0e0_0, -5.1_2_6_3e0_0],
] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(_A, return_tensors="pt" ).to(_A )
UpperCamelCase__ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_A, (1, 3, 800, 1088) )
with torch.no_grad():
UpperCamelCase__ = model(**_A )
# masks_queries_logits
UpperCamelCase__ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
UpperCamelCase__ = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
UpperCamelCase__ = torch.tensor(_A ).to(_A )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], _A, atol=_A ) )
# class_queries_logits
UpperCamelCase__ = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
UpperCamelCase__ = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]] ).to(_A )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], _A, atol=_A ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(_A )
.eval()
)
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )], segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )], return_tensors="pt", )
UpperCamelCase__ = inputs["pixel_values"].to(_A )
UpperCamelCase__ = [el.to(_A ) for el in inputs["mask_labels"]]
UpperCamelCase__ = [el.to(_A ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCamelCase__ = model(**_A )
self.assertTrue(outputs.loss is not None )
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["stage2", "stage3", "stage4"] , )
UpperCamelCase__ = DetaConfig(
backbone_config=snake_case__ , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=snake_case__ , with_box_refine=snake_case__ , two_stage=snake_case__ , )
# set labels
UpperCamelCase__ = 'huggingface/label-files'
if "o365" in model_name:
UpperCamelCase__ = 3_66
UpperCamelCase__ = 'object365-id2label.json'
else:
UpperCamelCase__ = 91
UpperCamelCase__ = 'coco-detection-id2label.json'
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type="dataset" ) ) , "r" ) )
UpperCamelCase__ = {int(snake_case__ ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.patch_embed.proj.weight", "model.backbone.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.0.body.patch_embed.proj.bias", "model.backbone.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.0.body.patch_embed.norm.weight", "model.backbone.model.embeddings.norm.weight") )
rename_keys.append(("backbone.0.body.patch_embed.norm.bias", "model.backbone.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.reduction.weight', F'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.weight', F'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.0.body.layers.{i}.downsample.norm.bias', F'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("backbone.0.body.norm1.weight", "model.backbone.model.hidden_states_norms.stage2.weight") )
rename_keys.append(("backbone.0.body.norm1.bias", "model.backbone.model.hidden_states_norms.stage2.bias") )
rename_keys.append(("backbone.0.body.norm2.weight", "model.backbone.model.hidden_states_norms.stage3.weight") )
rename_keys.append(("backbone.0.body.norm2.bias", "model.backbone.model.hidden_states_norms.stage3.bias") )
rename_keys.append(("backbone.0.body.norm3.weight", "model.backbone.model.hidden_states_norms.stage4.weight") )
rename_keys.append(("backbone.0.body.norm3.bias", "model.backbone.model.hidden_states_norms.stage4.bias") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', F'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', F'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', F'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', F'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.weight', F'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.value_proj.bias', F'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.weight', F'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.self_attn.output_proj.bias', F'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.weight', F'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm1.bias', F'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.weight', F'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear1.bias', F'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.weight', F'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.linear2.bias', F'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.weight', F'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.encoder.layers.{i}.norm2.bias', F'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', F'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', F'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', F'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', F'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', F'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', F'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', F'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.weight', F'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm1.bias', F'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.weight', F'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.self_attn.out_proj.bias', F'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.weight', F'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm2.bias', F'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.weight', F'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear1.bias', F'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.weight', F'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.linear2.bias', F'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.weight', F'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((F'transformer.decoder.layers.{i}.norm3.bias', F'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(snake_case__ )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
UpperCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:hidden_size, :]
UpperCamelCase__ = in_proj_bias[:hidden_size]
UpperCamelCase__ = in_proj_weight[
hidden_size : hidden_size * 2, :
]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size:, :]
UpperCamelCase__ = in_proj_bias[-hidden_size:]
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCamelCase__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ = get_deta_config(snake_case__ )
# load original state dict
if model_name == "deta-swin-large":
UpperCamelCase__ = hf_hub_download(repo_id="nielsr/deta-checkpoints" , filename="adet_swin_ft.pth" )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = hf_hub_download(repo_id="jozhang97/deta-swin-l-o365" , filename="deta_swin_pt_o365.pth" )
else:
raise ValueError(F'Model name {model_name} not supported' )
UpperCamelCase__ = torch.load(snake_case__ , map_location="cpu" )['model']
# original state dict
for name, param in state_dict.items():
print(snake_case__ , param.shape )
# rename keys
UpperCamelCase__ = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_swin_q_k_v(snake_case__ , config.backbone_config )
read_in_decoder_q_k_v(snake_case__ , snake_case__ )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
UpperCamelCase__ = state_dict.pop(snake_case__ )
UpperCamelCase__ = val
if "input_proj" in key:
UpperCamelCase__ = state_dict.pop(snake_case__ )
UpperCamelCase__ = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
UpperCamelCase__ = state_dict.pop(snake_case__ )
UpperCamelCase__ = val
# finally, create HuggingFace model and load state dict
UpperCamelCase__ = DetaForObjectDetection(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
UpperCamelCase__ = 'cuda' if torch.cuda.is_available() else 'cpu'
model.to(snake_case__ )
# load image processor
UpperCamelCase__ = DetaImageProcessor(format="coco_detection" )
# verify our conversion on image
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = processor(images=snake_case__ , return_tensors="pt" )
UpperCamelCase__ = encoding['pixel_values']
UpperCamelCase__ = model(pixel_values.to(snake_case__ ) )
# verify logits
print("Logits:" , outputs.logits[0, :3, :3] )
print("Boxes:" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
UpperCamelCase__ = torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
UpperCamelCase__ = torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
UpperCamelCase__ = torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
UpperCamelCase__ = torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(snake_case__ ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(snake_case__ ) , atol=1e-4 )
print("Everything ok!" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(F'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
# Push to hub
if push_to_hub:
print("Pushing model and processor to hub..." )
model.push_to_hub(F'jozhang97/{model_name}' )
processor.push_to_hub(F'jozhang97/{model_name}' )
if __name__ == "__main__":
__lowercase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
type=str,
default="deta-swin-large",
choices=["deta-swin-large", "deta-swin-large-o365"],
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase: Tuple = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : str, a_ : Optional[int], a_ : Optional[int]=7, a_ : int=3, a_ : Union[str, Any]=18, a_ : Tuple=30, a_ : List[str]=400, a_ : str=True, a_ : List[Any]=None, a_ : List[str]=True, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"height": 18, "width": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = apply_ocr
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE , unittest.TestCase):
_lowerCamelCase : int = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, "do_resize" ) )
self.assertTrue(hasattr(a_, "size" ) )
self.assertTrue(hasattr(a_, "apply_ocr" ) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
def lowercase_ ( self : str ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_, Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
self.assertIsInstance(encoding.words, a_ )
self.assertIsInstance(encoding.boxes, a_ )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_, torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/fixtures_docvqa", split="test" )
UpperCamelCase__ = Image.open(ds[0]["file"] ).convert("RGB" )
UpperCamelCase__ = image_processing(a_, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
UpperCamelCase__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, a_ )
self.assertListEqual(encoding.boxes, a_ )
# with apply_OCR = False
UpperCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=a_ )
UpperCamelCase__ = image_processing(a_, return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 224, 224) )
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: Any = get_tests_dir("fixtures/test_sentencepiece.model")
__lowercase: List[str] = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
__lowercase: Dict = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( UpperCAmelCase_ , unittest.TestCase):
_lowerCamelCase : List[str] = CamembertTokenizer
_lowerCamelCase : int = CamembertTokenizerFast
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : str = True
def lowercase_ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = '''<pad>'''
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ), __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ), __lowercase )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>NOTUSED" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(__lowercase ), 1004 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1005 )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = CamembertTokenizer(__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase__ = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ = tokenizer.encode(__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
self.assertListEqual(__lowercase, __lowercase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(__lowercase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
def lowercase_ ( self : Any ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = '''I was born in 92000, and this is falsé.'''
UpperCamelCase__ = tokenizer.tokenize(__lowercase )
UpperCamelCase__ = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase, add_special_tokens=__lowercase )
self.assertListEqual(__lowercase, __lowercase )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(__lowercase )
UpperCamelCase__ = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase, __lowercase )
@slow
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {'''input_ids''': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=__lowercase, model_name="camembert-base", revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf", sequences=__lowercase, )
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Any=None ) -> Dict:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
UpperCamelCase__ = nn.Parameter(snake_case_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
UpperCamelCase__ = nn.Parameter(snake_case_ )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = np.asarray(weights[0] )
UpperCamelCase__ = np.asarray(weights[1] )
UpperCamelCase__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = np.asarray(weights[0] )
UpperCamelCase__ = np.asarray(weights[1] )
UpperCamelCase__ = np.asarray(weights[2] )
UpperCamelCase__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(snake_case_ ).transpose(1 , 2 ).contiguous().view(-1 , snake_case_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(snake_case_ ).view(-1 , snake_case_ ).contiguous().transpose(0 , 1 ) , )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = weights[0][0][0]
UpperCamelCase__ = np.asarray(layer_norm_a[0] )
UpperCamelCase__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# lsh weights + output
UpperCamelCase__ = weights[0][1]
if len(snake_case_ ) < 4:
set_layer_weights_in_torch_lsh(snake_case_ , torch_block.attention , snake_case_ )
else:
set_layer_weights_in_torch_local(snake_case_ , torch_block.attention , snake_case_ )
# intermediate weighs
UpperCamelCase__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(snake_case_ ) == 4:
UpperCamelCase__ = intermediate_weights[2]
# layernorm 2
UpperCamelCase__ = np.asarray(intermediate_weights[0][0] )
UpperCamelCase__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# intermediate dense
UpperCamelCase__ = np.asarray(intermediate_weights[1][0] )
UpperCamelCase__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
# intermediate out
UpperCamelCase__ = np.asarray(intermediate_weights[4][0] )
UpperCamelCase__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = torch_model.reformer
# word embeds
UpperCamelCase__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(snake_case_ ) , )
if isinstance(weights[3] , snake_case_ ):
UpperCamelCase__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
UpperCamelCase__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
UpperCamelCase__ = nn.Parameter(torch.tensor(snake_case_ ) )
UpperCamelCase__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
snake_case_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
UpperCamelCase__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(snake_case_ , snake_case_ , snake_case_ )
# output layer norm
UpperCamelCase__ = np.asarray(weights[7][0] )
UpperCamelCase__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(snake_case_ ) , torch.tensor(snake_case_ ) , )
# output embeddings
UpperCamelCase__ = np.asarray(weights[9][0] )
UpperCamelCase__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(snake_case_ ).transpose(0 , 1 ).contiguous() , torch.tensor(snake_case_ ) , )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = ReformerConfig.from_json_file(snake_case_ )
print(F'Building PyTorch model from configuration: {config}' )
UpperCamelCase__ = ReformerModelWithLMHead(snake_case_ )
with open(snake_case_ , "rb" ) as f:
UpperCamelCase__ = pickle.load(snake_case_ )["weights"]
set_model_weights_in_torch(snake_case_ , snake_case_ , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowercase: Optional[int] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__lowercase: Union[str, Any] = Lock()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : Any , _UpperCamelCase : int ) -> Any:
'''simple docstring'''
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCAmelCase_ )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
UpperCamelCase__ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
UpperCamelCase__ = min(lowerCAmelCase_ , lowerCAmelCase_ )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCAmelCase_ )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
UpperCamelCase__ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
UpperCamelCase__ = max(lowerCAmelCase_ , lowerCAmelCase_ )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
UpperCamelCase__ = Pipe()
UpperCamelCase__ = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase_ , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
UpperCamelCase__ = temp_rs
UpperCamelCase__ = temp_rr
for i in range(1 , len(lowerCAmelCase_ ) - 1 ):
UpperCamelCase__ = Pipe()
UpperCamelCase__ = Pipe()
process_array_.append(
Process(
target=lowerCAmelCase_ , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
UpperCamelCase__ = temp_rs
UpperCamelCase__ = temp_rr
process_array_.append(
Process(
target=lowerCAmelCase_ , args=(
len(lowerCAmelCase_ ) - 1,
arr[len(lowerCAmelCase_ ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCAmelCase_ ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCAmelCase_ ) ):
UpperCamelCase__ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*lowerCAmelCase_ )
UpperCamelCase__ = odd_even_transposition(lowerCAmelCase_ )
print("Sorted List\n" )
print(*lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase :
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = []
def lowercase_ ( self : Any, a_ : Optional[int] ):
"""simple docstring"""
return self.node_position[vertex]
def lowercase_ ( self : Union[str, Any], a_ : List[str], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = pos
def lowercase_ ( self : List[str], a_ : str, a_ : List[str], a_ : str, a_ : Tuple ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCamelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCamelCase__ = 2 * start + 1
else:
UpperCamelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCamelCase__ = heap[smallest_child], positions[smallest_child]
UpperCamelCase__ = (
heap[start],
positions[start],
)
UpperCamelCase__ = temp, tempa
UpperCamelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], UpperCamelCase__ )
self.top_to_bottom(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
def lowercase_ ( self : Tuple, a_ : Tuple, a_ : Union[str, Any], a_ : Optional[Any], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = position[index]
while index != 0:
UpperCamelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCamelCase__ = heap[parent]
UpperCamelCase__ = position[parent]
self.set_position(position[parent], UpperCamelCase__ )
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(UpperCamelCase__, UpperCamelCase__ )
break
UpperCamelCase__ = parent
else:
UpperCamelCase__ = val
UpperCamelCase__ = temp
self.set_position(UpperCamelCase__, 0 )
def lowercase_ ( self : Optional[int], a_ : Any, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = len(UpperCamelCase__ ) // 2 - 1
for i in range(UpperCamelCase__, -1, -1 ):
self.top_to_bottom(UpperCamelCase__, UpperCamelCase__, len(UpperCamelCase__ ), UpperCamelCase__ )
def lowercase_ ( self : str, a_ : Tuple, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = positions[0]
UpperCamelCase__ = sys.maxsize
self.top_to_bottom(UpperCamelCase__, 0, len(UpperCamelCase__ ), UpperCamelCase__ )
return temp
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
UpperCamelCase__ = Heap()
UpperCamelCase__ = [0] * len(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = [-1] * len(__SCREAMING_SNAKE_CASE ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCamelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCamelCase__ = []
for vertex in range(len(__SCREAMING_SNAKE_CASE ) ):
distance_tv.append(sys.maxsize )
positions.append(__SCREAMING_SNAKE_CASE )
heap.node_position.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase__ = []
UpperCamelCase__ = 1
UpperCamelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCamelCase__ = 0
UpperCamelCase__ = distance
heap.heapify(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for _ in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
UpperCamelCase__ = heap.delete_minimum(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCamelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__SCREAMING_SNAKE_CASE )]
):
UpperCamelCase__ = distance
heap.bottom_to_top(
__SCREAMING_SNAKE_CASE , heap.get_position(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__lowercase: Tuple = int(input("Enter number of edges: ").strip())
__lowercase: Optional[int] = defaultdict(list)
for _ in range(edges_number):
__lowercase: int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
from timeit import timeit
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Union[str, Any]:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCamelCase__ = 0
while number:
number &= number - 1
result += 1
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
UpperCamelCase__ = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
def do_benchmark(_UpperCamelCase : int ) -> None:
UpperCamelCase__ = "import __main__ as z"
print(F'Benchmark when {number = }:' )
print(F'{get_set_bits_count_using_modulo_operator(__lowerCAmelCase ) = }' )
UpperCamelCase__ = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__lowerCAmelCase )
print(F'timeit() runs in {timing} seconds' )
print(F'{get_set_bits_count_using_brian_kernighans_algorithm(__lowerCAmelCase ) = }' )
UpperCamelCase__ = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__lowerCAmelCase , )
print(F'timeit() runs in {timing} seconds' )
for number in (25, 37, 58, 0):
do_benchmark(__lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE):
_lowerCamelCase : int = "gpt_neox_japanese"
def __init__( self : Union[str, Any], a_ : List[str]=3_2000, a_ : Any=2560, a_ : int=32, a_ : List[str]=32, a_ : Tuple=4, a_ : List[Any]="gelu", a_ : Optional[Any]=1.00, a_ : Optional[int]=1_0000, a_ : Optional[int]=2048, a_ : Dict=0.02, a_ : Optional[int]=1e-5, a_ : str=True, a_ : Optional[Any]=3_1996, a_ : Optional[Any]=3_1999, a_ : List[Any]=0.1, a_ : Union[str, Any]=0.0, **a_ : str, ):
"""simple docstring"""
super().__init__(bos_token_id=A_, eos_token_id=A_, **A_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
for part_id in partition_order:
UpperCamelCase__ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(_UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(1_00 ).repartition(1 )
UpperCamelCase__ = Spark(_UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(10 ).repartition(2 )
UpperCamelCase__ = [1, 0]
UpperCamelCase__ = _generate_iterable_examples(_UpperCamelCase , _UpperCamelCase ) # Reverse the partitions.
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , _UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(10 ).repartition(1 )
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCamelCase__ = lambda _UpperCamelCase : x.reverse()
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [2, 1, 0] )
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shuffle_data_sources(_UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(1_00 ).repartition(1 )
UpperCamelCase__ = Spark(_UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowercase: str = {
"configuration_falcon": ["FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP", "FalconConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = [
"FALCON_PRETRAINED_MODEL_ARCHIVE_LIST",
"FalconForCausalLM",
"FalconModel",
"FalconPreTrainedModel",
"FalconForSequenceClassification",
"FalconForTokenClassification",
"FalconForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowercase: str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
UpperCamelCase__ = np.array(_UpperCamelCase )
UpperCamelCase__ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , _UpperCamelCase ) ) , x.transpose() ) , _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = (1, 2, 1)
UpperCamelCase__ = (1, 1, 0, 7)
UpperCamelCase__ = SARIMAX(
_UpperCamelCase , exog=_UpperCamelCase , order=_UpperCamelCase , seasonal_order=_UpperCamelCase )
UpperCamelCase__ = model.fit(disp=_UpperCamelCase , maxiter=6_00 , method="nm" )
UpperCamelCase__ = model_fit.predict(1 , len(_UpperCamelCase ) , exog=[test_match] )
return result[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = regressor.predict(_UpperCamelCase )
return y_pred[0]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
train_user.sort()
UpperCamelCase__ = np.percentile(_UpperCamelCase , 25 )
UpperCamelCase__ = np.percentile(_UpperCamelCase , 75 )
UpperCamelCase__ = qa - qa
UpperCamelCase__ = qa - (iqr * 0.1)
return low_lim
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : float ) -> bool:
'''simple docstring'''
UpperCamelCase__ = 0
UpperCamelCase__ = 0
for i in list_vote:
if i > actual_result:
UpperCamelCase__ = not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__lowercase: Dict = [[18_231, 0.0, 1], [22_621, 1.0, 2], [15_675, 0.0, 3], [23_583, 1.0, 4]]
__lowercase: int = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__lowercase: Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
__lowercase: int = normalize_df[:, 2].tolist()
__lowercase: List[Any] = normalize_df[:, 0].tolist()
__lowercase: Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__lowercase: List[Any] = normalize_df[:, [1, 2]].tolist()
__lowercase: str = x[: len(x) - 1]
__lowercase: Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
__lowercase: Optional[Any] = total_date[: len(total_date) - 1]
__lowercase: List[str] = total_user[: len(total_user) - 1]
__lowercase: Dict = total_match[: len(total_match) - 1]
__lowercase: Union[str, Any] = total_date[len(total_date) - 1 :]
__lowercase: List[Any] = total_user[len(total_user) - 1 :]
__lowercase: Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__lowercase: Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__lowercase: Union[str, Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : dict ) -> tuple:
'''simple docstring'''
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> XGBClassifier:
'''simple docstring'''
UpperCamelCase__ = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = load_iris()
UpperCamelCase__ , UpperCamelCase__ = data_handling(_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.2_5 )
UpperCamelCase__ = iris["target_names"]
# Create an XGBoost Classifier from the training data
UpperCamelCase__ = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> float:
'''simple docstring'''
def get_matched_characters(_UpperCamelCase : str , _UpperCamelCase : str ) -> str:
UpperCamelCase__ = []
UpperCamelCase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
UpperCamelCase__ = int(max(0 , i - limit ) )
UpperCamelCase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCamelCase )
UpperCamelCase__ = F'{_stra[0:_stra.index(_UpperCamelCase )]} {_stra[_stra.index(_UpperCamelCase ) + 1:]}'
return "".join(_UpperCamelCase )
# matching characters
UpperCamelCase__ = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = get_matched_characters(_UpperCamelCase , _UpperCamelCase )
UpperCamelCase__ = len(_UpperCamelCase )
# transposition
UpperCamelCase__ = (
len([(ca, ca) for ca, ca in zip(_UpperCamelCase , _UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
UpperCamelCase__ = 0.0
else:
UpperCamelCase__ = (
1
/ 3
* (
match_count / len(_UpperCamelCase )
+ match_count / len(_UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
UpperCamelCase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("hello", "world"))
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ''
_lowerCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase : str = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str, a_ : str = "", a_ : Optional[str] = None, a_ : Optional[dict] = None, **a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(self, **a_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ = fsspec.open(
a_, mode="rb", protocol=a_, compression=self.compression, client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
UpperCamelCase__ = os.path.basename(self.file.path.split("::" )[0] )
UpperCamelCase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ = None
@classmethod
def lowercase_ ( cls : Optional[Any], a_ : List[Any] ):
"""simple docstring"""
return super()._strip_protocol(a_ ).lstrip("/" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if self.dir_cache is None:
UpperCamelCase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCamelCase__ = {f["name"]: f}
def lowercase_ ( self : int, a_ : str ):
"""simple docstring"""
return self.file.open().read()
def lowercase_ ( self : List[Any], a_ : str, a_ : str = "rb", a_ : Union[str, Any]=None, a_ : Any=True, a_ : Dict=None, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = self._strip_protocol(a_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'bz2'
_lowerCamelCase : List[Any] = 'bz2'
_lowerCamelCase : Optional[int] = '.bz2'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = 'gzip'
_lowerCamelCase : Any = 'gzip'
_lowerCamelCase : List[Any] = '.gz'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'lz4'
_lowerCamelCase : int = 'lz4'
_lowerCamelCase : Optional[int] = '.lz4'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = 'xz'
_lowerCamelCase : Optional[Any] = 'xz'
_lowerCamelCase : Optional[Any] = '.xz'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'zstd'
_lowerCamelCase : int = 'zstd'
_lowerCamelCase : Union[str, Any] = '.zst'
def __init__( self : str, a_ : str, a_ : str = "rb", a_ : Optional[str] = None, a_ : Optional[dict] = None, a_ : int = DEFAULT_BLOCK_SIZE, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(
fo=a_, mode=a_, target_protocol=a_, target_options=a_, block_size=a_, **a_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ = self.file.__enter__
class UpperCAmelCase :
def __init__( self : List[Any], a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = file_
def __enter__( self : Tuple ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Tuple, *a_ : str, **a_ : Any ):
"""simple docstring"""
self._file.__exit__(*a_, **a_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : int, a_ : Tuple ):
"""simple docstring"""
return getattr(self._file, a_ )
def fixed_enter(*a_ : int, **a_ : Optional[Any] ):
return WrappedFile(_enter(*a_, **a_ ) )
UpperCamelCase__ = fixed_enter
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
__lowercase: str = "#"
class UpperCAmelCase :
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {}
def lowercase_ ( self : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def lowercase_ ( self : Optional[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(a_ )
def lowercase_ ( self : int, a_ : dict ):
"""simple docstring"""
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [" "] if c == END else [(c + s) for s in self._elements(a_ )]
result.extend(a_ )
return tuple(a_ )
__lowercase: Tuple = Trie()
__lowercase: Any = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> tuple:
'''simple docstring'''
UpperCamelCase__ = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowercase = logging.get_logger(__name__)
__lowercase = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : str, a_ : Optional[int]=None, a_ : str=None, *a_ : str, **a_ : str ):
"""simple docstring"""
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
UpperCamelCase__ = self.model.config
else:
UpperCamelCase__ = config
UpperCamelCase__ = data_args
UpperCamelCase__ = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
" padding.." )
if self.args.label_smoothing == 0:
UpperCamelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCamelCase__ = label_smoothed_nll_loss
def lowercase_ ( self : Dict, a_ : int ):
"""simple docstring"""
if self.optimizer is None:
UpperCamelCase__ = ["bias", "LayerNorm.weight"]
UpperCamelCase__ = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCamelCase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCamelCase__ = Adafactor
UpperCamelCase__ = {"scale_parameter": False, "relative_step": False}
else:
UpperCamelCase__ = AdamW
UpperCamelCase__ = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCamelCase__ = self.args.learning_rate
if self.sharded_ddp:
UpperCamelCase__ = OSS(
params=a_, optim=a_, **a_, )
else:
UpperCamelCase__ = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
UpperCamelCase__ = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase_ ( self : Dict, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCamelCase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCamelCase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
UpperCamelCase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase_ ( self : List[Any], a_ : Any, a_ : Union[str, Any], a_ : List[str] ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCamelCase__ = model(**a_, use_cache=a_ )[0]
UpperCamelCase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
UpperCamelCase__ , UpperCamelCase__ = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
UpperCamelCase__ = model(**a_, use_cache=a_ )[0]
UpperCamelCase__ = torch.nn.functional.log_softmax(a_, dim=-1 )
UpperCamelCase__ , UpperCamelCase__ = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase_ ( self : Any, a_ : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = inputs.pop("labels" )
UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(a_, a_, a_ )
return loss
def lowercase_ ( self : Optional[int], a_ : nn.Module, a_ : Dict[str, Union[torch.Tensor, Any]], a_ : bool, a_ : Optional[List[str]] = None, ):
"""simple docstring"""
UpperCamelCase__ = self._prepare_inputs(a_ )
UpperCamelCase__ = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCamelCase__ = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ = self._pad_tensors_to_max_len(a_, gen_kwargs["max_length"] )
UpperCamelCase__ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(a_, a_, a_ )
UpperCamelCase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCamelCase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ = self._pad_tensors_to_max_len(a_, gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f' padded to `max_length`={max_length}' )
UpperCamelCase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
UpperCamelCase__ = tensor
return padded_tensor
| 359
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 0
|
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = LayoutLMTokenizer
_lowerCamelCase : int = LayoutLMTokenizerFast
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : Union[str, Any] = True
def lowercase_ ( self : Dict ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def lowercase_ ( self : Optional[Any], **a_ : Any ):
"""simple docstring"""
return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[int], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "UNwant\u00E9d,running"
UpperCamelCase__ = "unwanted, running"
return input_text, output_text
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer_class(self.vocab_file )
UpperCamelCase__ = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_, ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), [7, 4, 5, 10, 8, 9] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
| 360
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 0
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowercase: int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
super().__init__(*a_, **a_ )
requires_backends(self, "decord" )
self.check_model_type(a_ )
def lowercase_ ( self : Optional[Any], a_ : str=None, a_ : Optional[Any]=None, a_ : Dict=None ):
"""simple docstring"""
UpperCamelCase__ = {}
if frame_sampling_rate is not None:
UpperCamelCase__ = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ = num_frames
UpperCamelCase__ = {}
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any], a_ : Union[str, List[str]], **a_ : str ):
"""simple docstring"""
return super().__call__(a_, **a_ )
def lowercase_ ( self : List[Any], a_ : List[str], a_ : Union[str, Any]=None, a_ : Optional[Any]=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase__ = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase__ = BytesIO(requests.get(a_ ).content )
UpperCamelCase__ = VideoReader(a_ )
videoreader.seek(0 )
UpperCamelCase__ = 0
UpperCamelCase__ = num_frames * frame_sampling_rate - 1
UpperCamelCase__ = np.linspace(a_, a_, num=a_, dtype=np.intaa )
UpperCamelCase__ = videoreader.get_batch(a_ ).asnumpy()
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = self.image_processor(a_, return_tensors=self.framework )
return model_inputs
def lowercase_ ( self : str, a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model(**a_ )
return model_outputs
def lowercase_ ( self : Tuple, a_ : Any, a_ : Dict=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(a_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_, a_ )]
| 361
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = 'camembert'
def __init__( self : Optional[Any], a_ : Any=3_0522, a_ : Union[str, Any]=768, a_ : str=12, a_ : int=12, a_ : str=3072, a_ : Tuple="gelu", a_ : str=0.1, a_ : List[Any]=0.1, a_ : Union[str, Any]=512, a_ : Optional[Any]=2, a_ : str=0.02, a_ : Union[str, Any]=1e-1_2, a_ : Any=1, a_ : Union[str, Any]=0, a_ : str=2, a_ : Tuple="absolute", a_ : Union[str, Any]=True, a_ : Union[str, Any]=None, **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 362
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : List[str] = ViTImageProcessor if is_vision_available() else None
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = (3, 32, 128)
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
UpperCamelCase__ = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 32, "width": 128},
}
UpperCamelCase__ = os.path.join(self.tmpdirname, a_ )
with open(self.image_processor_file, "w", encoding="utf-8" ) as fp:
json.dump(a_, a_ )
def lowercase_ ( self : int, **a_ : int ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[int], **a_ : str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )
UpperCamelCase__ = Image.fromarray(np.moveaxis(a_, 0, -1 ) )
return image_input
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = MgpstrProcessor.from_pretrained(self.tmpdirname, use_fast=a_ )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, a_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor, a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
UpperCamelCase__ = self.get_image_processor(do_normalize=a_, padding_value=1.0 )
UpperCamelCase__ = MgpstrProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=a_, padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer, a_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(a_, return_tensors="np" )
UpperCamelCase__ = processor(images=a_, return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "test"
UpperCamelCase__ = processor(text=a_ )
UpperCamelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "test"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.char_decode(a_ )
UpperCamelCase__ = tokenizer.batch_decode(a_ )
UpperCamelCase__ = [seq.replace(" ", "" ) for seq in decoded_tok]
self.assertListEqual(a_, a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = None
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = MgpstrProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = torch.randn(1, 27, 38 )
UpperCamelCase__ = torch.randn(1, 27, 5_0257 )
UpperCamelCase__ = torch.randn(1, 27, 3_0522 )
UpperCamelCase__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ), ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 363
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase: List[str] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowercase: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__lowercase: Any = logging.get_logger(__name__)
__lowercase: Union[str, Any] = {
"t5-small": "https://huggingface.co/t5-small/resolve/main/config.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/config.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/config.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/config.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 't5'
_lowerCamelCase : Optional[int] = ['past_key_values']
_lowerCamelCase : List[Any] = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : str, a_ : str=3_2128, a_ : List[Any]=512, a_ : str=64, a_ : Union[str, Any]=2048, a_ : Optional[int]=6, a_ : Optional[Any]=None, a_ : Tuple=8, a_ : Tuple=32, a_ : Dict=128, a_ : Tuple=0.1, a_ : Any=1e-6, a_ : List[str]=1.0, a_ : str="relu", a_ : str=True, a_ : Union[str, Any]=True, a_ : List[str]=0, a_ : Union[str, Any]=1, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = d_model
UpperCamelCase__ = d_kv
UpperCamelCase__ = d_ff
UpperCamelCase__ = num_layers
UpperCamelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase__ = num_heads
UpperCamelCase__ = relative_attention_num_buckets
UpperCamelCase__ = relative_attention_max_distance
UpperCamelCase__ = dropout_rate
UpperCamelCase__ = layer_norm_epsilon
UpperCamelCase__ = initializer_factor
UpperCamelCase__ = feed_forward_proj
UpperCamelCase__ = use_cache
UpperCamelCase__ = self.feed_forward_proj.split("-" )
UpperCamelCase__ = act_info[-1]
UpperCamelCase__ = act_info[0] == "gated"
if len(a_ ) > 1 and act_info[0] != "gated" or len(a_ ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase__ = "gelu_new"
super().__init__(
pad_token_id=a_, eos_token_id=a_, is_encoder_decoder=a_, **a_, )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
UpperCamelCase__ = "past_encoder_sequence + sequence"
UpperCamelCase__ = {0: "batch"}
UpperCamelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_, direction="inputs" )
return common_inputs
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 13
| 365
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 0
|
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = False
return [i for i in range(2 , _UpperCamelCase ) if is_prime[i]]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10**8 ) -> int:
'''simple docstring'''
UpperCamelCase__ = calculate_prime_numbers(max_number // 2 )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 366
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 0
|
'''simple docstring'''
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCamelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(_UpperCamelCase , 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCamelCase__ = (sidea + sidea + sidea) / 2
UpperCamelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 367
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 0
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = 'M-CLIP'
def __init__( self : int, a_ : Dict=1024, a_ : Union[str, Any]=768, **a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = transformerDimSize
UpperCamelCase__ = imageDimSize
super().__init__(**a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = MCLIPConfig
def __init__( self : List[str], a_ : Union[str, Any], *a_ : List[Any], **a_ : Tuple ):
"""simple docstring"""
super().__init__(a_, *a_, **a_ )
UpperCamelCase__ = XLMRobertaModel(a_ )
UpperCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims )
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.transformer(input_ids=a_, attention_mask=a_ )[0]
UpperCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a_ ), embs
| 368
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
__lowercase: Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]=None ) -> int:
'''simple docstring'''
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _UpperCamelCase ) , F'test requires torch version >= {version}' )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_UpperCamelCase )
__lowercase: Optional[int] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCamelCase )
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : List[Any] = True
@classmethod
def lowercase_ ( cls : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
@classmethod
def lowercase_ ( cls : Dict ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a_ )
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict, a_ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
UpperCamelCase__ = mocks if isinstance(a_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AcceleratorState()
UpperCamelCase__ = tensor[None].clone().to(state.device )
UpperCamelCase__ = gather(_UpperCamelCase ).cpu()
UpperCamelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class UpperCAmelCase :
def __init__( self : Optional[Any], a_ : Optional[Any], a_ : List[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[Any]=1_80 , _UpperCamelCase : int=False , _UpperCamelCase : Union[str, Any]=True ) -> _RunOutput:
'''simple docstring'''
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
UpperCamelCase__ = " ".join(_UpperCamelCase )
if result.returncode > 0:
UpperCamelCase__ = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
pass
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
try:
UpperCamelCase__ = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , "decode" ):
UpperCamelCase__ = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 369
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 0
|
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[str] = BarthezTokenizer
_lowerCamelCase : Optional[int] = BarthezTokenizerFast
_lowerCamelCase : Any = True
_lowerCamelCase : Any = True
def lowercase_ ( self : Any ):
"""simple docstring"""
super().setUp()
UpperCamelCase__ = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname, legacy_format=a_ )
UpperCamelCase__ = tokenizer
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = "<pad>"
UpperCamelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ), a_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ), a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<s>" )
self.assertEqual(vocab_keys[1], "<pad>" )
self.assertEqual(vocab_keys[-1], "<mask>" )
self.assertEqual(len(a_ ), 10_1122 )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 10_1122 )
@require_torch
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCamelCase__ = [0, 57, 3018, 7_0307, 91, 2]
UpperCamelCase__ = self.tokenizer(
a_, max_length=len(a_ ), padding=a_, truncation=a_, return_tensors="pt" )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 6), batch.input_ids.shape )
self.assertEqual((2, 6), batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(a_, a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = "I was born in 92000, and this is falsé."
UpperCamelCase__ = tokenizer.tokenize(a_ )
UpperCamelCase__ = rust_tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokenizer.encode(a_, add_special_tokens=a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_, add_special_tokens=a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = tokenizer.encode(a_ )
UpperCamelCase__ = rust_tokenizer.encode(a_ )
self.assertListEqual(a_, a_ )
@slow
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = {"input_ids": [[0, 490, 1_4328, 4507, 354, 47, 4_3669, 95, 25, 7_8117, 2_0215, 1_9779, 190, 22, 400, 4, 3_5343, 8_0310, 603, 86, 2_4937, 105, 3_3438, 9_4762, 196, 3_9642, 7, 15, 1_5933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_0534, 87, 25, 66, 3358, 196, 5_5289, 8, 8_2961, 81, 2204, 7_5203, 7, 15, 763, 1_2956, 216, 178, 1_4328, 9595, 1377, 6_9693, 7, 448, 7_1021, 196, 1_8106, 1437, 1_3974, 108, 9083, 4, 4_9315, 7, 39, 86, 1326, 2793, 4_6333, 4, 448, 196, 7_4588, 7, 4_9315, 7, 39, 21, 822, 3_8470, 74, 21, 6_6723, 6_2480, 8, 2_2050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
UpperCamelCase__ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=a_, model_name="moussaKam/mbarthez", revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6", sequences=a_, )
| 370
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : int = 0 ) -> list:
'''simple docstring'''
UpperCamelCase__ = length or len(_UpperCamelCase )
UpperCamelCase__ = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
UpperCamelCase__ , UpperCamelCase__ = list_data[i + 1], list_data[i]
UpperCamelCase__ = True
return list_data if not swapped else bubble_sort(_UpperCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 0
|
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = (UnCLIPScheduler,)
def lowercase_ ( self : str, **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**a_ )
return config
def lowercase_ ( self : int ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=a_, prev_timestep=a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="fixed_small_log" )
UpperCamelCase__ = scheduler_class(**a_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_549_625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_994_987 ) ) < 1e-5
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(variance_type="learned_range" )
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = 0.5
assert scheduler._get_variance(1, predicted_variance=a_ ) - -10.1_712_790 < 1e-5
assert scheduler._get_variance(487, predicted_variance=a_ ) - -5.7_998_052 < 1e-5
assert scheduler._get_variance(999, predicted_variance=a_ ) - -0.0_010_011 < 1e-5
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(a_ ):
# 1. predict noise residual
UpperCamelCase__ = model(a_, a_ )
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(a_, a_, a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 252.2_682_495 ) < 1e-2
assert abs(result_mean.item() - 0.3_284_743 ) < 1e-3
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(25 )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = torch.manual_seed(0 )
for i, t in enumerate(a_ ):
# 1. predict noise residual
UpperCamelCase__ = model(a_, a_ )
if i + 1 == timesteps.shape[0]:
UpperCamelCase__ = None
else:
UpperCamelCase__ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
UpperCamelCase__ = scheduler.step(
a_, a_, a_, prev_timestep=a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 258.2_044_983 ) < 1e-2
assert abs(result_mean.item() - 0.3_362_038 ) < 1e-3
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
| 350
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 0
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__ ( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 351
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections import Counter
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> typing.Counter[int]:
'''simple docstring'''
UpperCamelCase__ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(_UpperCamelCase , max_perimeter + 1 ):
UpperCamelCase__ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(_UpperCamelCase ):
UpperCamelCase__ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
UpperCamelCase__ = pythagorean_triple(_UpperCamelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(F"""Perimeter {solution()} has maximum solutions""")
| 352
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 0
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase: Dict = imread(r"digital_image_processing/image_data/lena_small.jpg")
__lowercase: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
UpperCamelCase__ = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
UpperCamelCase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__( ) -> Any:
UpperCamelCase__ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__( ) -> Any:
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__( ) -> int:
UpperCamelCase__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
UpperCamelCase__ = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
assert med.median_filter(_UpperCamelCase , 3 ).any()
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
UpperCamelCase__ , UpperCamelCase__ = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__( ) -> Dict:
UpperCamelCase__ = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
UpperCamelCase__ = bs.Burkes(imread(_UpperCamelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
UpperCamelCase__ = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
UpperCamelCase__ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCamelCase__ = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = image[x_coordinate][y_coordinate]
UpperCamelCase__ = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 353
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCamelCase__ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 354
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 355
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 0
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
UpperCamelCase__ = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" , revision=_UpperCamelCase )
| 356
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 0
|
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_UpperCamelCase , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_UpperCamelCase , default=5 )
parser.add_argument("--batch_size" , type=_UpperCamelCase , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_UpperCamelCase , default=1 )
parser.add_argument("--freeze" , type=_UpperCamelCase , default=_UpperCamelCase )
parser.add_argument("--learning_rate" , type=_UpperCamelCase , default=5e-4 )
parser.add_argument("--seed" , type=_UpperCamelCase , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_UpperCamelCase , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_UpperCamelCase , default=10 )
parser.add_argument("--weight_decay" , type=_UpperCamelCase , default=0.0_1 )
parser.add_argument("--output_dir" , type=_UpperCamelCase , default="./results" )
return parser.parse_args()
__lowercase: Union[str, Any] = load("accuracy")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = eval_pred
UpperCamelCase__ = np.argmax(_UpperCamelCase , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[str], a_ : int ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = trainer
def lowercase_ ( self : List[Any], a_ : Union[str, Any], a_ : List[str], a_ : int, **a_ : List[str] ):
"""simple docstring"""
if control.should_evaluate:
UpperCamelCase__ = deepcopy(a_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset, metric_key_prefix="train" )
return control_copy
def SCREAMING_SNAKE_CASE__( ) -> int:
'''simple docstring'''
UpperCamelCase__ = get_args()
set_seed(args.seed )
UpperCamelCase__ = load_dataset("codeparrot/codecomplex" , split="train" )
UpperCamelCase__ = dataset.train_test_split(test_size=0.2 )
UpperCamelCase__ = train_test["test"].train_test_split(test_size=0.5 )
UpperCamelCase__ = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCamelCase__ = tokenizer.eos_token
UpperCamelCase__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
UpperCamelCase__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCamelCase__ = False
UpperCamelCase__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_UpperCamelCase : Tuple ):
UpperCamelCase__ = tokenizer(example["src"] , truncation=_UpperCamelCase , max_length=10_24 )
UpperCamelCase__ = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCamelCase__ = train_test_validation.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=train_test_validation["train"].column_names , )
UpperCamelCase__ = DataCollatorWithPadding(tokenizer=_UpperCamelCase )
UpperCamelCase__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , compute_metrics=_UpperCamelCase , )
print("Training..." )
trainer.add_callback(CustomCallback(_UpperCamelCase ) )
trainer.train()
if __name__ == "__main__":
main()
| 357
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase: Optional[Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
__lowercase: Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
__lowercase: str = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
__lowercase: List[Any] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase: List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 358
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.