code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
__lowercase: Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Dict:
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(_UpperCamelCase ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase__ = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
UpperCamelCase__ = PipelineDataFormat.from_str(
format=_UpperCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(_UpperCamelCase , _UpperCamelCase )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Tuple, a_ : Pipeline, a_ : PipelineDataFormat ):
"""simple docstring"""
UpperCamelCase__ = nlp
UpperCamelCase__ = reader
@staticmethod
def lowercase_ ( a_ : ArgumentParser ):
"""simple docstring"""
UpperCamelCase__ = parser.add_parser("run", help="Run a pipeline through the CLI" )
run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run" )
run_parser.add_argument("--input", type=a_, help="Path to the file to use for inference" )
run_parser.add_argument("--output", type=a_, help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model", type=a_, help="Name or path to the model to instantiate." )
run_parser.add_argument("--config", type=a_, help="Name or path to the model's config to instantiate." )
run_parser.add_argument(
"--tokenizer", type=a_, help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column", type=a_, help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)", )
run_parser.add_argument(
"--format", type=a_, default="infer", choices=PipelineDataFormat.SUPPORTED_FORMATS, help="Input format to read from", )
run_parser.add_argument(
"--device", type=a_, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", )
run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file." )
run_parser.set_defaults(func=a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self._nlp, []
for entry in self._reader:
UpperCamelCase__ = nlp(**a_ ) if self._reader.is_multi_columns else nlp(a_ )
if isinstance(a_, a_ ):
outputs.append(a_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase__ = self._reader.save_binary(a_ )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(a_ )
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowercase: int = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[Any] = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
__lowercase: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = 'M-CLIP'
def __init__( self : int, a_ : Dict=1024, a_ : Union[str, Any]=768, **a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = transformerDimSize
UpperCamelCase__ = imageDimSize
super().__init__(**a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = MCLIPConfig
def __init__( self : List[str], a_ : Union[str, Any], *a_ : List[Any], **a_ : Tuple ):
"""simple docstring"""
super().__init__(a_, *a_, **a_ )
UpperCamelCase__ = XLMRobertaModel(a_ )
UpperCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions, out_features=config.numDims )
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.transformer(input_ids=a_, attention_mask=a_ )[0]
UpperCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a_ ), embs
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class UpperCAmelCase :
def __init__( self : List[Any], a_ : Optional[Any], a_ : Optional[int]=13, a_ : List[Any]=7, a_ : List[str]=True, a_ : Any=True, a_ : Optional[int]=True, a_ : str=True, a_ : List[str]=99, a_ : int=32, a_ : Optional[int]=2, a_ : Tuple=4, a_ : Optional[int]=37, a_ : Optional[Any]="gelu", a_ : Optional[int]=0.1, a_ : Optional[int]=0.1, a_ : Tuple=512, a_ : Dict=16, a_ : Union[str, Any]=2, a_ : Optional[int]=0.02, a_ : Optional[Any]=False, a_ : Union[str, Any]=True, a_ : str="None", a_ : str=3, a_ : int=4, a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_input_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_labels
UpperCamelCase__ = num_choices
UpperCamelCase__ = relative_attention
UpperCamelCase__ = position_biased_input
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = scope
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase__ = None
if self.use_input_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase__ = DebertaVaConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, relative_attention=self.relative_attention, position_biased_input=self.position_biased_input, initializer_range=self.initializer_range, return_dict=a_, )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : Tuple, a_ : str, a_ : Dict, a_ : int, a_ : Dict, a_ : Optional[int], a_ : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel(config=a_ )
UpperCamelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
UpperCamelCase__ = [input_ids, input_mask]
UpperCamelCase__ = model(a_ )
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self : Dict, a_ : List[str], a_ : Optional[int], a_ : Tuple, a_ : int, a_ : int, a_ : Tuple, a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForMaskedLM(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self : int, a_ : str, a_ : List[str], a_ : Dict, a_ : List[Any], a_ : Optional[int], a_ : Optional[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForSequenceClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict, a_ : List[Any], a_ : Optional[int], a_ : Dict, a_ : List[str], a_ : Optional[Any], a_ : Tuple, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFDebertaVaForTokenClassification(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def lowercase_ ( self : List[str], a_ : Any, a_ : Any, a_ : Dict, a_ : Optional[int], a_ : Any, a_ : int, a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaForQuestionAnswering(config=a_ )
UpperCamelCase__ = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
UpperCamelCase__ = model(a_ )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase : int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : int = False
_lowerCamelCase : List[Any] = False
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, hidden_size=37 )
def lowercase_ ( self : Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
self.assertIsNotNone(a_ )
@require_tf
class UpperCAmelCase ( unittest.TestCase):
@unittest.skip(reason="Model not available yet" )
def lowercase_ ( self : Any ):
"""simple docstring"""
pass
@slow
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = TFDebertaVaModel.from_pretrained("kamalkraj/deberta-v2-xlarge" )
UpperCamelCase__ = tf.constant([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase__ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
UpperCamelCase__ = model(a_, attention_mask=a_ )[0]
UpperCamelCase__ = tf.constant(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4], a_, atol=1e-4 )
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = os.path.join(args.tf_model_dir , "parameters.json" )
UpperCamelCase__ = json.loads(open(_UpperCamelCase ).read() )
if not params:
raise ValueError(
F'It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.' )
if not args.output.endswith(".pt" ):
UpperCamelCase__ = args.output + ".pt"
UpperCamelCase__ = OrderedDict()
with tf.device("/CPU:0" ):
UpperCamelCase__ = tf.train.load_checkpoint(args.tf_model_dir )
UpperCamelCase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
UpperCamelCase__ = reader.get_tensor(_UpperCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
UpperCamelCase__ = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
UpperCamelCase__ = 8
UpperCamelCase__ = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/moe" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
UpperCamelCase__ = key_name[-9:-7]
for i in range(16 ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
UpperCamelCase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/mlp" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p1/bias" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/kernel" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/p2/bias" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/ln" ):
UpperCamelCase__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.norm.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
UpperCamelCase__ = "model.blocks.%d.feed_forward.norm.weight" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/att" ):
UpperCamelCase__ = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
UpperCamelCase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
UpperCamelCase__ = state[:, 0, :, :]
UpperCamelCase__ = state[:, 1, :, :]
UpperCamelCase__ = state[:, 2, :, :]
UpperCamelCase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/o/kernel" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
UpperCamelCase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/an" ):
UpperCamelCase__ = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.norm.bias" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.endswith("/g" ):
UpperCamelCase__ = "model.blocks.%d.self_attn.norm.weight" % player
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
UpperCamelCase__ = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
UpperCamelCase__ = "model.%s.weight" % nlayer
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
if key_name.startswith("model/wte" ):
UpperCamelCase__ = "lm_head.weight"
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name.startswith("model/wob" ):
UpperCamelCase__ = "final_logits_bias"
UpperCamelCase__ = vnp.copy() # same in embedded
UpperCamelCase__ = state.reshape((1, -1) )
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense/kernel":
UpperCamelCase__ = "model.last_project.weight"
UpperCamelCase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
elif key_name == "model/dense_1/bias":
UpperCamelCase__ = "model.last_project.bias"
UpperCamelCase__ = vnp.copy() # same because it is one dimensional
UpperCamelCase__ = torch.tensor(_UpperCamelCase )
torch.save(_UpperCamelCase , args.output )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
__lowercase: List[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__lowercase: str = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__lowercase: list[int] = [ord(letter) for letter in string.ascii_lowercase]
__lowercase: set[int] = {ord(char) for char in VALID_CHARS}
__lowercase: list[str] = ["the", "be", "to", "of", "and", "in", "that", "have"]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] , _UpperCamelCase : tuple[int, ...] ) -> str | None:
'''simple docstring'''
UpperCamelCase__ = ""
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
for keychar, cipherchar in zip(cycle(_UpperCamelCase ) , _UpperCamelCase ):
UpperCamelCase__ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(_UpperCamelCase )
return decoded
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> list[str]:
'''simple docstring'''
UpperCamelCase__ = []
for key in product(_UpperCamelCase , repeat=3 ):
UpperCamelCase__ = try_key(_UpperCamelCase , _UpperCamelCase )
if encoded is not None:
possibles.append(_UpperCamelCase )
return possibles
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[str] , _UpperCamelCase : str ) -> list[str]:
'''simple docstring'''
return [possible for possible in possibles if common_word in possible.lower()]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "p059_cipher.txt" ) -> int:
'''simple docstring'''
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = Path(_UpperCamelCase ).parent.joinpath(_UpperCamelCase ).read_text(encoding="utf-8" )
UpperCamelCase__ = [int(_UpperCamelCase ) for number in data.strip().split("," )]
UpperCamelCase__ = filter_valid_chars(_UpperCamelCase )
for common_word in COMMON_WORDS:
UpperCamelCase__ = filter_common_word(_UpperCamelCase , _UpperCamelCase )
if len(_UpperCamelCase ) == 1:
break
UpperCamelCase__ = possibles[0]
return sum(ord(_UpperCamelCase ) for char in decoded_text )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import math
import os
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : dict[str, str] , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : str ) -> None:
'''simple docstring'''
lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = last_match_id
if math.loga(_UpperCamelCase ).is_integer():
for curr_key in lexicon:
UpperCamelCase__ = "0" + lexicon[curr_key]
UpperCamelCase__ = bin(_UpperCamelCase )[2:]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
index += 1
UpperCamelCase__ = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = os.path.getsize(_UpperCamelCase )
UpperCamelCase__ = bin(_UpperCamelCase )[2:]
UpperCamelCase__ = len(_UpperCamelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = compress_data(_UpperCamelCase )
UpperCamelCase__ = add_file_length(_UpperCamelCase , _UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int]=False , _UpperCamelCase : str=False ) -> Any:
'''simple docstring'''
UpperCamelCase__ = "backbone." if is_semantic else ""
UpperCamelCase__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'{prefix}blocks.{i}.norm1.weight', F'beit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm1.bias', F'beit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.weight', F'beit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'{prefix}blocks.{i}.attn.proj.bias', F'beit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.weight', F'beit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'{prefix}blocks.{i}.norm2.bias', F'beit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.weight', F'beit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc1.bias', F'beit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.weight', F'beit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'{prefix}blocks.{i}.mlp.fc2.bias', F'beit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
(F'{prefix}cls_token', "beit.embeddings.cls_token"),
(F'{prefix}patch_embed.proj.weight', "beit.embeddings.patch_embeddings.projection.weight"),
(F'{prefix}patch_embed.proj.bias', "beit.embeddings.patch_embeddings.projection.bias"),
(F'{prefix}pos_embed', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str]=False , _UpperCamelCase : Union[str, Any]=False ) -> Optional[int]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
UpperCamelCase__ = "backbone." if is_semantic else ""
# queries, keys and values
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.q_bias' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.attn.v_bias' )
UpperCamelCase__ = in_proj_weight[
: config.hidden_size, :
]
UpperCamelCase__ = q_bias
UpperCamelCase__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
UpperCamelCase__ = in_proj_weight[
-config.hidden_size :, :
]
UpperCamelCase__ = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_1' )
UpperCamelCase__ = state_dict.pop(F'{prefix}blocks.{i}.gamma_2' )
UpperCamelCase__ = gamma_a
UpperCamelCase__ = gamma_a
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> Any:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any]=False ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = False if "rvlcdip" in checkpoint_url else True
UpperCamelCase__ = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
UpperCamelCase__ = 10_24
UpperCamelCase__ = 40_96
UpperCamelCase__ = 24
UpperCamelCase__ = 16
# labels
if "rvlcdip" in checkpoint_url:
UpperCamelCase__ = 16
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = "rvlcdip-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
UpperCamelCase__ = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="cpu" )["model"]
UpperCamelCase__ = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
UpperCamelCase__ = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
UpperCamelCase__ = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = encoding["pixel_values"]
UpperCamelCase__ = model(_UpperCamelCase )
UpperCamelCase__ = outputs.logits
# verify logits
UpperCamelCase__ = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCamelCase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
UpperCamelCase__ = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
UpperCamelCase__ = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
__lowercase: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
__lowercase: int = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowercase: str = {
"configuration_clip": [
"CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPConfig",
"CLIPOnnxConfig",
"CLIPTextConfig",
"CLIPVisionConfig",
],
"processing_clip": ["CLIPProcessor"],
"tokenization_clip": ["CLIPTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["CLIPTokenizerFast"]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: str = ["CLIPFeatureExtractor"]
__lowercase: str = ["CLIPImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = [
"CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPModel",
"CLIPPreTrainedModel",
"CLIPTextModel",
"CLIPTextModelWithProjection",
"CLIPVisionModel",
"CLIPVisionModelWithProjection",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: int = [
"TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFCLIPModel",
"TFCLIPPreTrainedModel",
"TFCLIPTextModel",
"TFCLIPVisionModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[Any] = [
"FlaxCLIPModel",
"FlaxCLIPPreTrainedModel",
"FlaxCLIPTextModel",
"FlaxCLIPTextPreTrainedModel",
"FlaxCLIPVisionModel",
"FlaxCLIPVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCAmelCase :
@staticmethod
def lowercase_ ( *a_ : Any, **a_ : Any ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@require_torch
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", )
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["a", "b", "c"] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(a_ ), [
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}],
[{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}],
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
], )
@require_tf
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" )
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["a", "b", "c"] )
self.assertEqual(
nested_simplify(a_ ), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
[
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
{"score": 0.333, "label": ANY(a_ )},
],
], )
@slow
@require_torch
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
@slow
@require_tf
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = pipeline(
task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" )
# This is an image of 2 cats with remotes and no planes
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
UpperCamelCase__ = image_classifier(a_, candidate_labels=["cat", "plane", "remote"] )
self.assertEqual(
nested_simplify(a_ ), [
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
], )
UpperCamelCase__ = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2 )
self.assertEqual(
nested_simplify(a_ ), [
[
{"score": 0.511, "label": "remote"},
{"score": 0.485, "label": "cat"},
{"score": 0.004, "label": "plane"},
],
]
* 5, )
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowercase: List[str] = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[Any] = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowercase: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: Dict = {
"openai/whisper-base": "https://huggingface.co/openai/whisper-base/resolve/main/config.json",
}
# fmt: off
__lowercase: Dict = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__lowercase: Union[str, Any] = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'whisper'
_lowerCamelCase : Tuple = ['past_key_values']
_lowerCamelCase : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str], a_ : int=5_1865, a_ : Dict=80, a_ : int=6, a_ : Tuple=4, a_ : Dict=6, a_ : str=4, a_ : Optional[Any]=1536, a_ : Optional[Any]=1536, a_ : List[Any]=0.0, a_ : str=0.0, a_ : Union[str, Any]=5_0257, a_ : Any=True, a_ : List[str]=True, a_ : Any="gelu", a_ : Union[str, Any]=256, a_ : Dict=0.0, a_ : Union[str, Any]=0.0, a_ : List[Any]=0.0, a_ : str=0.02, a_ : int=False, a_ : str=1500, a_ : List[str]=448, a_ : List[Any]=5_0256, a_ : Dict=5_0256, a_ : Tuple=5_0256, a_ : Tuple=None, a_ : Tuple=[220, 5_0256], a_ : List[str]=False, a_ : int=256, a_ : int=False, a_ : List[str]=0.05, a_ : Union[str, Any]=10, a_ : List[Any]=2, a_ : Dict=0.0, a_ : Any=10, a_ : Any=0, a_ : str=7, **a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = vocab_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = d_model
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = encoder_attention_heads
UpperCamelCase__ = decoder_layers
UpperCamelCase__ = decoder_attention_heads
UpperCamelCase__ = decoder_ffn_dim
UpperCamelCase__ = encoder_ffn_dim
UpperCamelCase__ = dropout
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = activation_dropout
UpperCamelCase__ = activation_function
UpperCamelCase__ = init_std
UpperCamelCase__ = encoder_layerdrop
UpperCamelCase__ = decoder_layerdrop
UpperCamelCase__ = use_cache
UpperCamelCase__ = encoder_layers
UpperCamelCase__ = scale_embedding # scale factor will be sqrt(d_model) if True
UpperCamelCase__ = max_source_positions
UpperCamelCase__ = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
UpperCamelCase__ = classifier_proj_size
UpperCamelCase__ = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase__ = apply_spec_augment
UpperCamelCase__ = mask_time_prob
UpperCamelCase__ = mask_time_length
UpperCamelCase__ = mask_time_min_masks
UpperCamelCase__ = mask_feature_prob
UpperCamelCase__ = mask_feature_length
UpperCamelCase__ = mask_feature_min_masks
UpperCamelCase__ = median_filter_width
super().__init__(
pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, is_encoder_decoder=a_, decoder_start_token_id=a_, suppress_tokens=a_, begin_suppress_tokens=a_, **a_, )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict(
[
("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}),
] )
if self.use_past:
UpperCamelCase__ = {0: "batch"}
else:
UpperCamelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(a_, direction="inputs" )
return common_inputs
def lowercase_ ( self : Union[str, Any], a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional["TensorType"] = None, a_ : int = 2_2050, a_ : float = 5.0, a_ : int = 220, ):
"""simple docstring"""
UpperCamelCase__ = OrderedDict()
UpperCamelCase__ = OnnxConfig.generate_dummy_inputs(
self, preprocessor=preprocessor.feature_extractor, batch_size=a_, framework=a_, sampling_rate=a_, time_duration=a_, frequency=a_, )
UpperCamelCase__ = encoder_inputs["input_features"].shape[2]
UpperCamelCase__ = encoder_sequence_length // 2 if self.use_past else seq_length
UpperCamelCase__ = super().generate_dummy_inputs(
preprocessor.tokenizer, a_, a_, a_, a_ )
UpperCamelCase__ = encoder_inputs.pop("input_features" )
UpperCamelCase__ = decoder_inputs.pop("decoder_input_ids" )
if "past_key_values" in decoder_inputs:
UpperCamelCase__ = decoder_inputs.pop("past_key_values" )
return dummy_inputs
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 1e-3
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowercase: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Tuple = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__lowercase: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowercase: List[str] = logging.get_logger(__name__)
__lowercase: Dict = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__lowercase: str = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
__lowercase: Optional[Any] = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
__lowercase: Optional[int] = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase : Optional[int] = ['input_ids', 'attention_mask']
_lowerCamelCase : Union[str, Any] = DistilBertTokenizer
def __init__( self : Union[str, Any], a_ : str=None, a_ : Optional[Any]=None, a_ : Dict=True, a_ : List[Any]="[UNK]", a_ : Union[str, Any]="[SEP]", a_ : List[str]="[PAD]", a_ : List[Any]="[CLS]", a_ : Dict="[MASK]", a_ : Optional[Any]=True, a_ : str=None, **a_ : Union[str, Any], ):
"""simple docstring"""
super().__init__(
a_, tokenizer_file=a_, do_lower_case=a_, unk_token=a_, sep_token=a_, pad_token=a_, cls_token=a_, mask_token=a_, tokenize_chinese_chars=a_, strip_accents=a_, **a_, )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase", a_ ) != do_lower_case
or normalizer_state.get("strip_accents", a_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars", a_ ) != tokenize_chinese_chars
):
UpperCamelCase__ = getattr(a_, normalizer_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = tokenize_chinese_chars
UpperCamelCase__ = normalizer_class(**a_ )
UpperCamelCase__ = do_lower_case
def lowercase_ ( self : Optional[int], a_ : Dict, a_ : int=None ):
"""simple docstring"""
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self : Optional[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase__ = self._tokenizer.model.save(a_, name=a_ )
return tuple(a_ )
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = []
def lowercase_ ( self : Tuple, a_ : int, a_ : Dict, a_ : Optional[int], **a_ : Any ):
"""simple docstring"""
self.events.append("on_init_end" )
def lowercase_ ( self : Tuple, a_ : Dict, a_ : Union[str, Any], a_ : int, **a_ : int ):
"""simple docstring"""
self.events.append("on_train_begin" )
def lowercase_ ( self : Any, a_ : Dict, a_ : Optional[int], a_ : Tuple, **a_ : str ):
"""simple docstring"""
self.events.append("on_train_end" )
def lowercase_ ( self : Tuple, a_ : Dict, a_ : str, a_ : Optional[int], **a_ : List[Any] ):
"""simple docstring"""
self.events.append("on_epoch_begin" )
def lowercase_ ( self : int, a_ : Optional[Any], a_ : str, a_ : Optional[Any], **a_ : Optional[int] ):
"""simple docstring"""
self.events.append("on_epoch_end" )
def lowercase_ ( self : int, a_ : Optional[Any], a_ : List[Any], a_ : Optional[int], **a_ : List[str] ):
"""simple docstring"""
self.events.append("on_step_begin" )
def lowercase_ ( self : Optional[int], a_ : int, a_ : Tuple, a_ : Dict, **a_ : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_step_end" )
def lowercase_ ( self : Any, a_ : Any, a_ : Optional[int], a_ : Tuple, **a_ : List[Any] ):
"""simple docstring"""
self.events.append("on_evaluate" )
def lowercase_ ( self : Any, a_ : int, a_ : Dict, a_ : str, **a_ : List[str] ):
"""simple docstring"""
self.events.append("on_predict" )
def lowercase_ ( self : Union[str, Any], a_ : Optional[Any], a_ : int, a_ : Optional[Any], **a_ : Optional[int] ):
"""simple docstring"""
self.events.append("on_save" )
def lowercase_ ( self : str, a_ : Dict, a_ : Optional[int], a_ : Any, **a_ : Dict ):
"""simple docstring"""
self.events.append("on_log" )
def lowercase_ ( self : Dict, a_ : Optional[int], a_ : Any, a_ : Any, **a_ : Union[str, Any] ):
"""simple docstring"""
self.events.append("on_prediction_step" )
@require_torch
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
def lowercase_ ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowercase_ ( self : Optional[int], a_ : List[Any]=0, a_ : Optional[int]=0, a_ : int=64, a_ : int=64, a_ : Any=None, a_ : str=False, **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = RegressionDataset(length=a_ )
UpperCamelCase__ = RegressionDataset(length=a_ )
UpperCamelCase__ = RegressionModelConfig(a=a_, b=a_ )
UpperCamelCase__ = RegressionPreTrainedModel(a_ )
UpperCamelCase__ = TrainingArguments(self.output_dir, disable_tqdm=a_, report_to=[], **a_ )
return Trainer(
a_, a_, train_dataset=a_, eval_dataset=a_, callbacks=a_, )
def lowercase_ ( self : List[Any], a_ : Union[str, Any], a_ : str ):
"""simple docstring"""
self.assertEqual(len(a_ ), len(a_ ) )
# Order doesn't matter
UpperCamelCase__ = sorted(a_, key=lambda a_ : cb.__name__ if isinstance(a_, a_ ) else cb.__class__.__name__ )
UpperCamelCase__ = sorted(a_, key=lambda a_ : cb.__name__ if isinstance(a_, a_ ) else cb.__class__.__name__ )
for cba, cba in zip(a_, a_ ):
if isinstance(a_, a_ ) and isinstance(a_, a_ ):
self.assertEqual(a_, a_ )
elif isinstance(a_, a_ ) and not isinstance(a_, a_ ):
self.assertEqual(a_, cba.__class__ )
elif not isinstance(a_, a_ ) and isinstance(a_, a_ ):
self.assertEqual(cba.__class__, a_ )
else:
self.assertEqual(a_, a_ )
def lowercase_ ( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = ["on_init_end", "on_train_begin"]
UpperCamelCase__ = 0
UpperCamelCase__ = len(trainer.get_eval_dataloader() )
UpperCamelCase__ = ["on_prediction_step"] * len(trainer.get_eval_dataloader() ) + ["on_log", "on_evaluate"]
for _ in range(trainer.state.num_train_epochs ):
expected_events.append("on_epoch_begin" )
for _ in range(a_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append("on_log" )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append("on_save" )
expected_events.append("on_epoch_end" )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_trainer()
UpperCamelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
# Callbacks passed at init are added to the default callbacks
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCamelCase__ = self.get_trainer(disable_tqdm=a_ )
UpperCamelCase__ = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCamelCase__ = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(a_ )
expected_callbacks.remove(a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
UpperCamelCase__ = self.get_trainer()
UpperCamelCase__ = trainer.pop_callback(a_ )
self.assertEqual(cb.__class__, a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
trainer.add_callback(a_ )
expected_callbacks.insert(0, a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
# We can also add, pop, or remove by instance
UpperCamelCase__ = self.get_trainer()
UpperCamelCase__ = trainer.callback_handler.callbacks[0]
trainer.remove_callback(a_ )
expected_callbacks.remove(a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
UpperCamelCase__ = self.get_trainer()
UpperCamelCase__ = trainer.callback_handler.callbacks[0]
UpperCamelCase__ = trainer.pop_callback(a_ )
self.assertEqual(a_, a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
trainer.add_callback(a_ )
expected_callbacks.insert(0, a_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks, a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action="ignore", category=a_ )
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
# Independent log/save/eval
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], logging_steps=5 )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], save_steps=5 )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], eval_steps=5, evaluation_strategy="steps" )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
UpperCamelCase__ = self.get_trainer(callbacks=[MyTestTrainerCallback], evaluation_strategy="epoch" )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
# A bit of everything
UpperCamelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback], logging_steps=3, save_steps=10, eval_steps=5, evaluation_strategy="steps", )
trainer.train()
UpperCamelCase__ = trainer.callback_handler.callbacks[-2].events
self.assertEqual(a_, self.get_expected_events(a_ ) )
# warning should be emitted for duplicated callbacks
with patch("transformers.trainer_callback.logger.warning" ) as warn_mock:
UpperCamelCase__ = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback], )
assert str(a_ ) in warn_mock.call_args[0][0]
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = 'blip_2_vision_model'
def __init__( self : Dict, a_ : int=1408, a_ : Any=6144, a_ : Union[str, Any]=39, a_ : Union[str, Any]=16, a_ : List[str]=224, a_ : Optional[int]=14, a_ : Tuple="gelu", a_ : Tuple=0.00_001, a_ : Dict=0.0, a_ : Union[str, Any]=1e-1_0, a_ : int=True, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = patch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = hidden_act
UpperCamelCase__ = qkv_bias
@classmethod
def lowercase_ ( cls : Any, a_ : Union[str, os.PathLike], **a_ : Any ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a_, **a_ )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_, **a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'blip_2_qformer'
def __init__( self : Tuple, a_ : Tuple=3_0522, a_ : Any=768, a_ : Union[str, Any]=12, a_ : Optional[int]=12, a_ : Union[str, Any]=3072, a_ : List[Any]="gelu", a_ : Dict=0.1, a_ : Dict=0.1, a_ : List[Any]=512, a_ : Any=0.02, a_ : Optional[Any]=1e-1_2, a_ : str=0, a_ : List[str]="absolute", a_ : List[str]=2, a_ : List[Any]=1408, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(pad_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = cross_attention_frequency
UpperCamelCase__ = encoder_hidden_size
@classmethod
def lowercase_ ( cls : Optional[int], a_ : Union[str, os.PathLike], **a_ : List[str] ):
"""simple docstring"""
cls._set_token_in_kwargs(a_ )
UpperCamelCase__ , UpperCamelCase__ = cls.get_config_dict(a_, **a_ )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase__ = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls, "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(a_, **a_ )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'blip-2'
_lowerCamelCase : Dict = True
def __init__( self : Dict, a_ : List[Any]=None, a_ : Optional[Any]=None, a_ : Dict=None, a_ : Tuple=32, **a_ : Optional[int] ):
"""simple docstring"""
super().__init__(**a_ )
if vision_config is None:
UpperCamelCase__ = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
UpperCamelCase__ = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
UpperCamelCase__ = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase__ = BlipaVisionConfig(**a_ )
UpperCamelCase__ = BlipaQFormerConfig(**a_ )
UpperCamelCase__ = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase__ = CONFIG_MAPPING[text_model_type](**a_ )
UpperCamelCase__ = self.text_config.tie_word_embeddings
UpperCamelCase__ = self.text_config.is_encoder_decoder
UpperCamelCase__ = num_query_tokens
UpperCamelCase__ = self.vision_config.hidden_size
UpperCamelCase__ = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase__ = 1.0
UpperCamelCase__ = 0.02
@classmethod
def lowercase_ ( cls : int, a_ : BlipaVisionConfig, a_ : BlipaQFormerConfig, a_ : PretrainedConfig, **a_ : int, ):
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict(), qformer_config=qformer_config.to_dict(), text_config=text_config.to_dict(), **a_, )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = copy.deepcopy(self.__dict__ )
UpperCamelCase__ = self.vision_config.to_dict()
UpperCamelCase__ = self.qformer_config.to_dict()
UpperCamelCase__ = self.text_config.to_dict()
UpperCamelCase__ = self.__class__.model_type
return output
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__( self : int, a_ : Optional[int], a_ : Optional[Any]=13, a_ : Any=32, a_ : int=3, a_ : Dict=4, a_ : Optional[int]=[10, 20, 30, 40], a_ : Any=[2, 2, 3, 2], a_ : Optional[Any]=True, a_ : Dict=True, a_ : Optional[int]=37, a_ : List[Any]="gelu", a_ : Optional[int]=10, a_ : Dict=0.02, a_ : Dict=["stage2", "stage3", "stage4"], a_ : str=[2, 3, 4], a_ : Any=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_stages
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = initializer_range
UpperCamelCase__ = out_features
UpperCamelCase__ = out_indices
UpperCamelCase__ = scope
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a_, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def lowercase_ ( self : Optional[Any], a_ : List[str], a_ : Any, a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModel(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase_ ( self : Tuple, a_ : Any, a_ : Optional[Any], a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = ConvNextForImageClassification(a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_, labels=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int], a_ : Optional[int], a_ : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ = None
UpperCamelCase__ = ConvNextBackbone(config=a_ )
model.to(a_ )
model.eval()
UpperCamelCase__ = model(a_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase : str = (
{'feature-extraction': ConvNextModel, 'image-classification': ConvNextForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase : Union[str, Any] = True
_lowerCamelCase : List[Any] = False
_lowerCamelCase : Tuple = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[str] = False
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, has_text_modality=a_, hidden_size=37 )
def lowercase_ ( self : str ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : str ):
"""simple docstring"""
return
@unittest.skip(reason="ConvNext does not use inputs_embeds" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not support input and output embeddings" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
@unittest.skip(reason="ConvNext does not use feedforward chunking" )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(a_ : Any, a_ : int, a_ : List[Any] ):
UpperCamelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
UpperCamelCase__ = model(**self._prepare_for_class(a_, a_ ) )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = ConvNextModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return AutoImageProcessor.from_pretrained("facebook/convnext-tiny-224" ) if is_vision_available() else None
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextForImageClassification.from_pretrained("facebook/convnext-tiny-224" ).to(a_ )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a_, return_tensors="pt" ).to(a_ )
# forward pass
with torch.no_grad():
UpperCamelCase__ = model(**a_ )
# verify the logits
UpperCamelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, a_ )
UpperCamelCase__ = torch.tensor([-0.0_260, -0.4_739, 0.1_911] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a_, atol=1e-4 ) )
@require_torch
class UpperCAmelCase ( unittest.TestCase , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = (ConvNextBackbone,) if is_torch_available() else ()
_lowerCamelCase : str = ConvNextConfig
_lowerCamelCase : Optional[Any] = False
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ConvNextModelTester(self )
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import torch
from torch import nn
class UpperCAmelCase ( nn.Module):
def __init__( self : List[str], a_ : Optional[Any], a_ : List[Any], a_ : Union[str, Any], a_ : Union[str, Any], a_ : List[Any]=1, a_ : List[str]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = n_token
UpperCamelCase__ = d_embed
UpperCamelCase__ = d_proj
UpperCamelCase__ = cutoffs + [n_token]
UpperCamelCase__ = [0] + self.cutoffs
UpperCamelCase__ = div_val
UpperCamelCase__ = self.cutoffs[0]
UpperCamelCase__ = len(self.cutoffs ) - 1
UpperCamelCase__ = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
UpperCamelCase__ = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase__ = nn.ModuleList()
UpperCamelCase__ = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
UpperCamelCase__ = keep_order
def lowercase_ ( self : Tuple, a_ : Dict, a_ : Tuple, a_ : str, a_ : Union[str, Any] ):
"""simple docstring"""
if proj is None:
UpperCamelCase__ = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase__ = nn.functional.linear(a_, proj.t().contiguous() )
UpperCamelCase__ = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowercase_ ( self : Union[str, Any], a_ : Union[str, Any], a_ : List[Any]=None, a_ : List[Any]=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase__ = hidden[..., :-1, :].contiguous()
UpperCamelCase__ = labels[..., 1:].contiguous()
UpperCamelCase__ = hidden.view(-1, hidden.size(-1 ) )
UpperCamelCase__ = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("Input and labels should have the same size in the batch dimension." )
else:
UpperCamelCase__ = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase__ = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
UpperCamelCase__ = labels != -100
UpperCamelCase__ = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
UpperCamelCase__ = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ = self.out_layers[i].weight
UpperCamelCase__ = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCamelCase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
UpperCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase__ = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
UpperCamelCase__ = 0
UpperCamelCase__ = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase__ = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase__ = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase__ = labels.index_select(0, a_ ) - l_idx
UpperCamelCase__ = head_logprob.index_select(0, a_ )
UpperCamelCase__ = hidden.index_select(0, a_ )
else:
UpperCamelCase__ = hidden
if i == 0:
if labels is not None:
UpperCamelCase__ = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase__ = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase__ = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase__ = logprob_i
if labels is not None:
if (hasattr(self, "keep_order" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowercase_ ( self : Tuple, a_ : Optional[Any] ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase__ = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
UpperCamelCase__ , UpperCamelCase__ = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase__ , UpperCamelCase__ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase__ = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase__ = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase__ = self.out_layers[i].weight
UpperCamelCase__ = self.out_layers[i].bias
if i == 0:
UpperCamelCase__ = torch.cat([weight_i, self.cluster_weight], dim=0 )
UpperCamelCase__ = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[0], biases[0], self.out_projs[0]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
UpperCamelCase__ , UpperCamelCase__ = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase__ = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = weights[i], biases[i], self.out_projs[i]
UpperCamelCase__ = self._compute_logit(a_, a_, a_, a_ )
UpperCamelCase__ = nn.functional.log_softmax(a_, dim=1 )
UpperCamelCase__ = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase__ = logprob_i
return out
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
__lowercase: Any = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser("tpu-config" , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser("Accelerate tpu-config command" , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
"Config Arguments" , "Arguments that can be configured through `accelerate config`." )
config_args.add_argument(
"--config_file" , type=_UpperCamelCase , default=_UpperCamelCase , help="Path to the config file to use for accelerate." , )
config_args.add_argument(
"--tpu_name" , default=_UpperCamelCase , help="The name of the TPU to use. If not specified, will use the TPU specified in the config file." , )
config_args.add_argument(
"--tpu_zone" , default=_UpperCamelCase , help="The zone of the TPU to use. If not specified, will use the zone specified in the config file." , )
UpperCamelCase__ = parser.add_argument_group("TPU Arguments" , "Arguments for options ran inside the TPU." )
pod_args.add_argument(
"--use_alpha" , action="store_true" , help="Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`." , )
pod_args.add_argument(
"--command_file" , default=_UpperCamelCase , help="The path to the file containing the commands to run on the pod on startup." , )
pod_args.add_argument(
"--command" , action="append" , nargs="+" , help="A command to run on the pod. Can be passed multiple times." , )
pod_args.add_argument(
"--install_accelerate" , action="store_true" , help="Whether to install accelerate on the pod. Defaults to False." , )
pod_args.add_argument(
"--accelerate_version" , default="latest" , help="The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub." , )
pod_args.add_argument(
"--debug" , action="store_true" , help="If set, will print the command that would be run instead of running it." )
if subparsers is not None:
parser.set_defaults(func=_UpperCamelCase )
return parser
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(_UpperCamelCase ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = "git+https://github.com/huggingface/accelerate.git"
elif args.accelerate_version == "latest":
UpperCamelCase__ = "accelerate -U"
elif isinstance(parse(args.accelerate_version ) , _UpperCamelCase ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError("You must specify either a command file or a command to run on the pod." )
if args.command_file:
with open(args.command_file , "r" ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , _UpperCamelCase ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ["cd /usr/share"]
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = "; ".join(_UpperCamelCase )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ["gcloud"]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(_UpperCamelCase )}' )
return
subprocess.run(_UpperCamelCase )
print("Successfully setup pod." )
def SCREAMING_SNAKE_CASE__( ) -> int:
'''simple docstring'''
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(_UpperCamelCase )
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
from ... import PretrainedConfig
__lowercase: Optional[Any] = {
"sijunhe/nezha-cn-base": "https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_lowerCamelCase : Union[str, Any] = 'nezha'
def __init__( self : List[str], a_ : Any=2_1128, a_ : Any=768, a_ : Optional[int]=12, a_ : List[Any]=12, a_ : Union[str, Any]=3072, a_ : List[str]="gelu", a_ : List[Any]=0.1, a_ : Optional[Any]=0.1, a_ : Any=512, a_ : List[str]=64, a_ : Any=2, a_ : Tuple=0.02, a_ : int=1e-1_2, a_ : str=0.1, a_ : Any=0, a_ : Tuple=2, a_ : Any=3, a_ : Optional[int]=True, **a_ : List[str], ):
"""simple docstring"""
super().__init__(pad_token_id=a_, bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = max_relative_position
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = classifier_dropout
UpperCamelCase__ = use_cache
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str=False ) -> Tuple:
'''simple docstring'''
try:
UpperCamelCase__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCamelCase__ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCamelCase__ = strtobool(_UpperCamelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F'If set, {key} must be yes or no.' )
return _value
__lowercase: Union[str, Any] = parse_flag_from_env("RUN_SLOW", default=False)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skip("Test was skipped" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> str:
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , "test is slow" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , "test requires only a CPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Any:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , "test requires a GPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , "test requires a XPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , "test requires a `mps` backend support in `torch`" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , "test requires the Hugging Face suite" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , "test requires the bitsandbytes library" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , "test requires TPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , "test requires a GPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , "test requires a XPU" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Optional[int]:
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , "test requires multiple GPUs" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> List[str]:
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , "test requires multiple XPUs" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , "test requires safetensors" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , "test requires DeepSpeed" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> Tuple:
'''simple docstring'''
return unittest.skipUnless(is_torch_version(">=" , "1.12.0" ) , "test requires torch version >= 1.12.0" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : List[str]=None ) -> int:
'''simple docstring'''
if test_case is None:
return partial(_UpperCamelCase , version=_UpperCamelCase )
return unittest.skipUnless(is_torch_version(">=" , _UpperCamelCase ) , F'test requires torch version >= {version}' )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , "test requires Tensorboard" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> str:
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , "test requires wandb" )(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Any:
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , "test requires comet_ml" )(_UpperCamelCase )
__lowercase: Optional[int] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , "test requires at least one tracker to be available and for `comet_ml` to not be installed" , )(_UpperCamelCase )
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : List[Any] = True
@classmethod
def lowercase_ ( cls : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
@classmethod
def lowercase_ ( cls : Dict ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("**/*" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(a_ )
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict, a_ : Union[mock.Mock, List[mock.Mock]] ):
"""simple docstring"""
UpperCamelCase__ = mocks if isinstance(a_, (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = AcceleratorState()
UpperCamelCase__ = tensor[None].clone().to(state.device )
UpperCamelCase__ = gather(_UpperCamelCase ).cpu()
UpperCamelCase__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , _UpperCamelCase ):
return False
return True
class UpperCAmelCase :
def __init__( self : Optional[Any], a_ : Optional[Any], a_ : List[Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = returncode
UpperCamelCase__ = stdout
UpperCamelCase__ = stderr
async def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] ) -> int:
'''simple docstring'''
while True:
UpperCamelCase__ = await stream.readline()
if line:
callback(_UpperCamelCase )
else:
break
async def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , _UpperCamelCase : int=None , _UpperCamelCase : int=False , _UpperCamelCase : int=False ) -> _RunOutput:
'''simple docstring'''
if echo:
print("\nRunning: " , " ".join(_UpperCamelCase ) )
UpperCamelCase__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCamelCase__ = []
UpperCamelCase__ = []
def tee(_UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str="" ):
UpperCamelCase__ = line.decode("utf-8" ).rstrip()
sink.append(_UpperCamelCase )
if not quiet:
print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label="stdout:" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label="stderr:" ) ) ),
] , timeout=_UpperCamelCase , )
return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=None , _UpperCamelCase : str=None , _UpperCamelCase : Optional[Any]=1_80 , _UpperCamelCase : int=False , _UpperCamelCase : Union[str, Any]=True ) -> _RunOutput:
'''simple docstring'''
UpperCamelCase__ = asyncio.get_event_loop()
UpperCamelCase__ = loop.run_until_complete(
_stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) )
UpperCamelCase__ = " ".join(_UpperCamelCase )
if result.returncode > 0:
UpperCamelCase__ = "\n".join(result.stderr )
raise RuntimeError(
F'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
F'The combined stderr from workers follows:\n{stderr}' )
return result
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
pass
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any]=False ) -> List[Any]:
'''simple docstring'''
try:
UpperCamelCase__ = subprocess.check_output(_UpperCamelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(_UpperCamelCase , "decode" ):
UpperCamelCase__ = output.decode("utf-8" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F'Command `{" ".join(_UpperCamelCase )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import math
from collections.abc import Callable
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Callable[[float], float] , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
UpperCamelCase__ = xa
UpperCamelCase__ = xa
while True:
if x_n == x_na or function(_UpperCamelCase ) == function(_UpperCamelCase ):
raise ZeroDivisionError("float division by zero, could not find root" )
UpperCamelCase__ = x_na - (
function(_UpperCamelCase ) / ((function(_UpperCamelCase ) - function(_UpperCamelCase )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase__ = x_na
UpperCamelCase__ = x_na
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
return math.pow(_UpperCamelCase , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = ['speech']
def __init__( self : List[str], *a_ : List[str], **a_ : Optional[int] ):
"""simple docstring"""
requires_backends(self, ["speech"] )
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Tuple = ['speech']
def __init__( self : List[Any], *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
requires_backends(self, ["speech"] )
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: int = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
__lowercase: Optional[int] = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
__lowercase: Optional[Any] = {
"facebook/bart-base": 1_024,
"facebook/bart-large": 1_024,
"facebook/bart-large-mnli": 1_024,
"facebook/bart-large-cnn": 1_024,
"facebook/bart-large-xsum": 1_024,
"yjernite/bart_eli5": 1_024,
}
@lru_cache()
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
UpperCamelCase__ = bs[:]
UpperCamelCase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCamelCase__ = [chr(_UpperCamelCase ) for n in cs]
return dict(zip(_UpperCamelCase , _UpperCamelCase ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCamelCase__ = set()
UpperCamelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase__ = char
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : List[str] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int], a_ : int, a_ : List[str], a_ : Optional[int]="replace", a_ : int="<s>", a_ : List[Any]="</s>", a_ : List[Any]="</s>", a_ : Tuple="<s>", a_ : Tuple="<unk>", a_ : Any="<pad>", a_ : Optional[int]="<mask>", a_ : Any=False, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else bos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else eos_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else sep_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else cls_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else unk_token
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase__ = AddedToken(a_, lstrip=a_, rstrip=a_ ) if isinstance(a_, a_ ) else mask_token
super().__init__(
errors=a_, bos_token=a_, eos_token=a_, unk_token=a_, sep_token=a_, cls_token=a_, pad_token=a_, mask_token=a_, add_prefix_space=a_, **a_, )
with open(a_, encoding="utf-8" ) as vocab_handle:
UpperCamelCase__ = json.load(a_ )
UpperCamelCase__ = {v: k for k, v in self.encoder.items()}
UpperCamelCase__ = errors # how to handle errors in decoding
UpperCamelCase__ = bytes_to_unicode()
UpperCamelCase__ = {v: k for k, v in self.byte_encoder.items()}
with open(a_, encoding="utf-8" ) as merges_handle:
UpperCamelCase__ = merges_handle.read().split("\n" )[1:-1]
UpperCamelCase__ = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = {}
UpperCamelCase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return len(self.encoder )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder, **self.added_tokens_encoder )
def lowercase_ ( self : List[Any], a_ : int ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = get_pairs(a_ )
if not pairs:
return token
while True:
UpperCamelCase__ = min(a_, key=lambda a_ : self.bpe_ranks.get(a_, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase__ , UpperCamelCase__ = bigram
UpperCamelCase__ = []
UpperCamelCase__ = 0
while i < len(a_ ):
try:
UpperCamelCase__ = word.index(a_, a_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase__ = j
if word[i] == first and i < len(a_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase__ = tuple(a_ )
UpperCamelCase__ = new_word
if len(a_ ) == 1:
break
else:
UpperCamelCase__ = get_pairs(a_ )
UpperCamelCase__ = " ".join(a_ )
UpperCamelCase__ = word
return word
def lowercase_ ( self : Any, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = []
for token in re.findall(self.pat, a_ ):
UpperCamelCase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a_ ).split(" " ) )
return bpe_tokens
def lowercase_ ( self : Any, a_ : Optional[int] ):
"""simple docstring"""
return self.encoder.get(a_, self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Optional[int], a_ : str ):
"""simple docstring"""
return self.decoder.get(a_ )
def lowercase_ ( self : List[str], a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = "".join(a_ )
UpperCamelCase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def lowercase_ ( self : Tuple, a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(a_, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=a_, ensure_ascii=a_ ) + "\n" )
UpperCamelCase__ = 0
with open(a_, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda a_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
UpperCamelCase__ = token_index
writer.write(" ".join(a_ ) + "\n" )
index += 1
return vocab_file, merge_file
def lowercase_ ( self : Optional[Any], a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
UpperCamelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : str, a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
if token_ids_a is None:
return [1] + ([0] * len(a_ )) + [1]
return [1] + ([0] * len(a_ )) + [1, 1] + ([0] * len(a_ )) + [1]
def lowercase_ ( self : Tuple, a_ : List[int], a_ : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self : Tuple, a_ : Dict, a_ : str=False, **a_ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(a_ ) > 0 and not text[0].isspace()):
UpperCamelCase__ = " " + text
return (text, kwargs)
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO
)
__lowercase: Tuple = logging.getLogger(__name__)
if __name__ == "__main__":
__lowercase: Optional[Any] = argparse.ArgumentParser(
description="Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"
)
parser.add_argument(
"--data_file", type=str, default="data/dump.bert-base-uncased.pickle", help="The binarized dataset."
)
parser.add_argument(
"--token_counts_dump", type=str, default="data/token_counts.bert-base-uncased.pickle", help="The dump file."
)
parser.add_argument("--vocab_size", default=30_522, type=int)
__lowercase: Optional[Any] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, "rb") as fp:
__lowercase: List[str] = pickle.load(fp)
logger.info("Counting occurrences for MLM.")
__lowercase: List[str] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__lowercase: List[Any] = [0] * args.vocab_size
for k, v in counter.items():
__lowercase: List[str] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, "wb") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel(), logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = logging.get_verbosity()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(a_ ) as cl:
logger.warning(a_ )
self.assertEqual(cl.out, msg + "\n" )
# restore to the original level
logging.set_verbosity(a_ )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def lowercase_ ( self : str ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = os.getenv("TRANSFORMERS_VERBOSITY", a_ )
UpperCamelCase__ = logging.log_levels[env_level_str]
UpperCamelCase__ = logging.get_verbosity()
self.assertEqual(
a_, a_, f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}', )
# restore to the original level
UpperCamelCase__ = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.logging.getLogger()
with CaptureLogger(a_ ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error", cl.out )
# no need to restore as nothing was changed
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ = logging.get_logger("transformers.models.bart.tokenization_bart" )
UpperCamelCase__ = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out, "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(a_ ) as cl:
logger.warning_advice(a_ )
self.assertEqual(cl.out, msg + "\n" )
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowercase: Tuple = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: List[str] = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
__lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
UpperCamelCase__ = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_UpperCamelCase )
DownloadCommand.register_subcommand(_UpperCamelCase )
EnvironmentCommand.register_subcommand(_UpperCamelCase )
RunCommand.register_subcommand(_UpperCamelCase )
ServeCommand.register_subcommand(_UpperCamelCase )
UserCommands.register_subcommand(_UpperCamelCase )
AddNewModelCommand.register_subcommand(_UpperCamelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCamelCase )
LfsCommands.register_subcommand(_UpperCamelCase )
PTtoTFCommand.register_subcommand(_UpperCamelCase )
# Let's go
UpperCamelCase__ = parser.parse_args()
if not hasattr(_UpperCamelCase , "func" ):
parser.print_help()
exit(1 )
# Run
UpperCamelCase__ = args.func(_UpperCamelCase )
service.run()
if __name__ == "__main__":
main()
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: List[Any] = logging.get_logger()
@dataclass
class UpperCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : List[nn.Module] = field(default_factory=SCREAMING_SNAKE_CASE__)
_lowerCamelCase : list = field(default_factory=SCREAMING_SNAKE_CASE__)
def lowercase_ ( self : Union[str, Any], a_ : List[Any], a_ : Tensor, a_ : Tensor ):
"""simple docstring"""
UpperCamelCase__ = len(list(m.modules() ) ) == 1 or isinstance(a_, nn.Convad ) or isinstance(a_, nn.BatchNormad )
if has_not_submodules:
self.traced.append(a_ )
def __call__( self : Union[str, Any], a_ : Tensor ):
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(a_ )
[x.remove() for x in self.handles]
return self
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return list(filter(lambda a_ : len(list(x.state_dict().keys() ) ) > 0, self.traced ) )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : nn.Module
_lowerCamelCase : nn.Module
_lowerCamelCase : int = 0
_lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__)
_lowerCamelCase : List = field(default_factory=SCREAMING_SNAKE_CASE__)
def __call__( self : str, a_ : Tensor ):
"""simple docstring"""
UpperCamelCase__ = Tracker(self.dest )(a_ ).parametrized
UpperCamelCase__ = Tracker(self.src )(a_ ).parametrized
UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.src_skip, a_ ) )
UpperCamelCase__ = list(filter(lambda a_ : type(a_ ) not in self.dest_skip, a_ ) )
if len(a_ ) != len(a_ ):
raise Exception(
f'Numbers of operations are different. Source module has {len(a_ )} operations while'
f' destination module has {len(a_ )}.' )
for dest_m, src_m in zip(a_, a_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'Transfered from={src_m} to={dest_m}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : ResNetConfig , _UpperCamelCase : Path , _UpperCamelCase : bool = True ) -> Dict:
'''simple docstring'''
print(F'Converting {name}...' )
with torch.no_grad():
UpperCamelCase__ = timm.create_model(_UpperCamelCase , pretrained=_UpperCamelCase ).eval()
UpperCamelCase__ = ResNetForImageClassification(_UpperCamelCase ).eval()
UpperCamelCase__ = ModuleTransfer(src=_UpperCamelCase , dest=_UpperCamelCase )
UpperCamelCase__ = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_UpperCamelCase )
assert torch.allclose(from_model(_UpperCamelCase ) , our_model(_UpperCamelCase ).logits ), "The model logits don't match the original one."
UpperCamelCase__ = F'resnet{"-".join(name.split("resnet" ) )}'
print(_UpperCamelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_UpperCamelCase , )
# we can use the convnext one
UpperCamelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_UpperCamelCase , )
print(F'Pushed {checkpoint_name}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Path , _UpperCamelCase : str = None , _UpperCamelCase : bool = True ) -> int:
'''simple docstring'''
UpperCamelCase__ = "imagenet-1k-id2label.json"
UpperCamelCase__ = 10_00
UpperCamelCase__ = (1, num_labels)
UpperCamelCase__ = "huggingface/label-files"
UpperCamelCase__ = num_labels
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
UpperCamelCase__ = idalabel
UpperCamelCase__ = {v: k for k, v in idalabel.items()}
UpperCamelCase__ = partial(_UpperCamelCase , num_labels=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase )
UpperCamelCase__ = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_UpperCamelCase , names_to_config[model_name] , _UpperCamelCase , _UpperCamelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return config, expected_shape
if __name__ == "__main__":
__lowercase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
__lowercase: Any = parser.parse_args()
__lowercase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> str:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'Expected string as input, found {type(_UpperCamelCase )}'
raise ValueError(_UpperCamelCase )
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'Expected boolean as use_pascal parameter, found {type(_UpperCamelCase )}'
raise ValueError(_UpperCamelCase )
UpperCamelCase__ = input_str.split("_" )
UpperCamelCase__ = 0 if use_pascal else 1
UpperCamelCase__ = words[start_index:]
UpperCamelCase__ = [word[0].upper() + word[1:] for word in words_to_capitalize]
UpperCamelCase__ = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
pass
class UpperCAmelCase :
def __init__( self : Optional[int], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = data
UpperCamelCase__ = None
def __iter__( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self
UpperCamelCase__ = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(a_ )
yield node.data
UpperCamelCase__ = node.next_node
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
__lowercase: List[Any] = Node(1)
__lowercase: List[Any] = Node(2)
__lowercase: Any = Node(3)
__lowercase: Optional[int] = Node(4)
print(root_node.has_loop) # False
__lowercase: Dict = root_node.next_node
print(root_node.has_loop) # True
__lowercase: List[str] = Node(5)
__lowercase: str = Node(6)
__lowercase: Tuple = Node(5)
__lowercase: Tuple = Node(6)
print(root_node.has_loop) # False
__lowercase: Any = Node(1)
print(root_node.has_loop) # False
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = (KDPMaDiscreteScheduler,)
_lowerCamelCase : Tuple = 10
def lowercase_ ( self : str, **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 1100,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**a_ )
return config
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001], [0.0_002, 0.002, 0.02] ):
self.check_over_configs(beta_start=a_, beta_end=a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7 ) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7 ) < 1e-2
assert abs(result_mean.item() - 0.0_002 ) < 1e-3
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(a_ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
def lowercase_ ( self : Any ):
"""simple docstring"""
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
scheduler.set_timesteps(self.num_inference_steps, device=a_ )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.to(a_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
UpperCamelCase__ = model(a_, a_ )
UpperCamelCase__ = scheduler.step(a_, a_, a_ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
if str(a_ ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4_125 ) < 1e-2
assert abs(result_mean.item() - 0.0_266 ) < 1e-3
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = BioGptTokenizer
_lowerCamelCase : Any = False
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCamelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w" ) as fp:
fp.write(json.dumps(a_ ) )
with open(self.merges_file, "w" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : List[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = BioGptTokenizer(self.vocab_file, self.merges_file )
UpperCamelCase__ = "lower"
UpperCamelCase__ = ["low", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + ["<unk>"]
UpperCamelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
UpperCamelCase__ = tokenizer.encode("sequence builders", add_special_tokens=a_ )
UpperCamelCase__ = tokenizer.encode("multi-sequence build", add_special_tokens=a_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_ )
UpperCamelCase__ = tokenizer.build_inputs_with_special_tokens(a_, a_ )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
from typing import Any
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list , _UpperCamelCase : dict , _UpperCamelCase : dict , _UpperCamelCase : dict , ) -> list:
'''simple docstring'''
_validation(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# Creates data structures and fill initial step
UpperCamelCase__ = {}
UpperCamelCase__ = {}
for state in states_space:
UpperCamelCase__ = observations_space[0]
UpperCamelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
UpperCamelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(_UpperCamelCase ) ):
UpperCamelCase__ = observations_space[o]
UpperCamelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
# Update probabilities and pointers dicts
UpperCamelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
UpperCamelCase__ = arg_max
# The final observation
UpperCamelCase__ = observations_space[len(_UpperCamelCase ) - 1]
# argmax for given final observation
UpperCamelCase__ = ""
UpperCamelCase__ = -1
for k_state in states_space:
UpperCamelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
UpperCamelCase__ = probability
UpperCamelCase__ = k_state
UpperCamelCase__ = arg_max
# Process pointers backwards
UpperCamelCase__ = last_state
UpperCamelCase__ = []
for o in range(len(_UpperCamelCase ) - 1 , -1 , -1 ):
result.append(_UpperCamelCase )
UpperCamelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
_validate_lists(_UpperCamelCase , _UpperCamelCase )
_validate_dicts(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> None:
'''simple docstring'''
_validate_list(_UpperCamelCase , "observations_space" )
_validate_list(_UpperCamelCase , "states_space" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list'
raise ValueError(_UpperCamelCase )
else:
for x in _object:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a list of strings'
raise ValueError(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Any , ) -> None:
'''simple docstring'''
_validate_dict(_UpperCamelCase , "initial_probabilities" , _UpperCamelCase )
_validate_nested_dict(_UpperCamelCase , "transition_probabilities" )
_validate_nested_dict(_UpperCamelCase , "emission_probabilities" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str ) -> None:
'''simple docstring'''
_validate_dict(_object , _UpperCamelCase , _UpperCamelCase )
for x in _object.values():
_validate_dict(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : str , _UpperCamelCase : type , _UpperCamelCase : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , _UpperCamelCase ):
UpperCamelCase__ = F'{var_name} must be a dict'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object ):
UpperCamelCase__ = F'{var_name} all keys must be strings'
raise ValueError(_UpperCamelCase )
if not all(isinstance(_UpperCamelCase , _UpperCamelCase ) for x in _object.values() ):
UpperCamelCase__ = "nested dictionary " if nested else ""
UpperCamelCase__ = F'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(_UpperCamelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__lowercase: Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = ['pixel_values']
def __init__( self : Union[str, Any], a_ : bool = True, a_ : Dict[str, int] = None, a_ : float = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : List[str], ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"shortest_edge": 384}
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
# Default value set here for backwards compatibility where the value in config is None
UpperCamelCase__ = crop_pct if crop_pct is not None else 224 / 256
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : Optional[Any], a_ : np.ndarray, a_ : Dict[str, int], a_ : float, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Any, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
if "shortest_edge" not in size:
raise ValueError(f'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
UpperCamelCase__ = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
UpperCamelCase__ = int(shortest_edge / crop_pct )
UpperCamelCase__ = get_resize_output_image_size(a_, size=a_, default_to_square=a_ )
UpperCamelCase__ = resize(image=a_, size=a_, resample=a_, data_format=a_, **a_ )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=a_, size=(shortest_edge, shortest_edge), data_format=a_, **a_ )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
a_, size=(shortest_edge, shortest_edge), resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : Union[str, Any], a_ : np.ndarray, a_ : Union[int, float], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Tuple, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : str, a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : List[str], ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : str, a_ : ImageInput, a_ : bool = None, a_ : Dict[str, int] = None, a_ : float = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : float = None, a_ : bool = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : ChannelDimension = ChannelDimension.FIRST, **a_ : Optional[Any], ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = crop_pct if crop_pct is not None else self.crop_pct
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_, default_to_square=a_ )
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, crop_pct=a_, resample=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
__lowercase: int = get_logger(__name__)
class UpperCAmelCase :
def __init__( self : List[Any], a_ : int, a_ : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self, a_, getattr(a_, a_ ) )
UpperCamelCase__ = module._original_module if isinstance(a_, _PatchedModuleObj ) else module
class UpperCAmelCase :
_lowerCamelCase : Optional[int] = []
def __init__( self : Tuple, a_ : Dict, a_ : str, a_ : Optional[int], a_ : List[Any]=None ):
"""simple docstring"""
UpperCamelCase__ = obj
UpperCamelCase__ = target
UpperCamelCase__ = new
UpperCamelCase__ = target.split("." )[0]
UpperCamelCase__ = {}
UpperCamelCase__ = attrs or []
def __enter__( self : Any ):
"""simple docstring"""
*UpperCamelCase__ , UpperCamelCase__ = self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(a_ ) ):
try:
UpperCamelCase__ = import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
UpperCamelCase__ = getattr(self.obj, a_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(a_, _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
UpperCamelCase__ = obj_attr
# patch at top level
setattr(self.obj, a_, _PatchedModuleObj(a_, attrs=self.attrs ) )
UpperCamelCase__ = getattr(self.obj, a_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(a_, a_, _PatchedModuleObj(getattr(a_, a_, a_ ), attrs=self.attrs ) )
UpperCamelCase__ = getattr(a_, a_ )
# finally set the target attribute
setattr(a_, a_, self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
UpperCamelCase__ = getattr(import_module(".".join(a_ ) ), a_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj, a_ ) is attr_value:
UpperCamelCase__ = getattr(self.obj, a_ )
setattr(self.obj, a_, self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
UpperCamelCase__ = globals()["__builtins__"][target_attr]
setattr(self.obj, a_, self.new )
else:
raise RuntimeError(f'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__( self : Any, *a_ : Tuple ):
"""simple docstring"""
for attr in list(self.original ):
setattr(self.obj, a_, self.original.pop(a_ ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self )
def lowercase_ ( self : Any ):
"""simple docstring"""
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = "ZinengTang/tvlt-base"
UpperCamelCase__ = tempfile.mkdtemp()
def lowercase_ ( self : List[str], **a_ : str ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint, **a_ )
def lowercase_ ( self : Tuple, **a_ : Optional[int] ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint, **a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=a_, feature_extractor=a_ )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor, a_ )
self.assertIsInstance(processor.image_processor, a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=a_, feature_extractor=a_ )
UpperCamelCase__ = np.ones([1_2000] )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" )
UpperCamelCase__ = processor(audio=a_, return_tensors="np" )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=a_, feature_extractor=a_ )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = image_processor(a_, return_tensors="np" )
UpperCamelCase__ = processor(images=a_, return_tensors="np" )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=a_, feature_extractor=a_ )
UpperCamelCase__ = np.ones([1_2000] )
UpperCamelCase__ = np.ones([3, 224, 224] )
UpperCamelCase__ = processor(audio=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_feature_extractor()
UpperCamelCase__ = TvltProcessor(image_processor=a_, feature_extractor=a_ )
self.assertListEqual(
processor.model_input_names, image_processor.model_input_names + feature_extractor.model_input_names, msg="`processor` and `image_processor`+`feature_extractor` model input names do not match", )
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
from math import isqrt
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _UpperCamelCase , _UpperCamelCase ):
UpperCamelCase__ = False
return [i for i in range(2 , _UpperCamelCase ) if is_prime[i]]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10**8 ) -> int:
'''simple docstring'''
UpperCamelCase__ = calculate_prime_numbers(max_number // 2 )
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(F"""{solution() = }""")
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
__lowercase: str = [8, 5, 9, 7]
__lowercase: Optional[int] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__lowercase: Dict = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self : List[Any], a_ : list[int], a_ : list[list[int]], a_ : list[list[int]], ):
"""simple docstring"""
UpperCamelCase__ = claim_vector
UpperCamelCase__ = allocated_resources_table
UpperCamelCase__ = maximum_claim_table
def lowercase_ ( self : Any ):
"""simple docstring"""
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowercase_ ( self : Dict ):
"""simple docstring"""
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(a_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {self.__need().index(a_ ): i for i in self.__need()}
def lowercase_ ( self : List[str], **a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.__need()
UpperCamelCase__ = self.__allocated_resources_table
UpperCamelCase__ = self.__available_resources()
UpperCamelCase__ = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print("_" * 50 + "\n" )
while need_list:
UpperCamelCase__ = False
for each_need in need_list:
UpperCamelCase__ = True
for index, need in enumerate(a_ ):
if need > available_resources[index]:
UpperCamelCase__ = False
break
if execution:
UpperCamelCase__ = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
UpperCamelCase__ = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(a_ )
# update available/freed resources stack
UpperCamelCase__ = np.array(a_ ) + np.array(
alloc_resources_table[process_number] )
print(
"Updated available resource stack for processes: "
+ " ".join([str(a_ ) for x in available_resources] ) )
break
if safe:
print("The process is in a safe state.\n" )
else:
print("System in unsafe state. Aborting...\n" )
break
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
print(" " * 9 + "Allocated Resource Table" )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(a_ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(" " * 9 + "System Resource Table" )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(a_ ) + 1}'
+ " ".join(f'{it:>8}' for it in item )
+ "\n" )
print(
"Current Usage by Active Processes: "
+ " ".join(str(a_ ) for x in self.__claim_vector ) )
print(
"Initial Available Resources: "
+ " ".join(str(a_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase ( unittest.TestCase):
@property
def lowercase_ ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase__ = UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), )
return model
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.dummy_uncond_unet
UpperCamelCase__ = ScoreSdeVeScheduler()
UpperCamelCase__ = ScoreSdeVePipeline(unet=a_, scheduler=a_ )
sde_ve.to(a_ )
sde_ve.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=2, output_type="numpy", generator=a_ ).images
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=2, output_type="numpy", generator=a_, return_dict=a_ )[
0
]
UpperCamelCase__ = image[0, -3:, -3:, -1]
UpperCamelCase__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = "google/ncsnpp-church-256"
UpperCamelCase__ = UNetaDModel.from_pretrained(a_ )
UpperCamelCase__ = ScoreSdeVeScheduler.from_pretrained(a_ )
UpperCamelCase__ = ScoreSdeVePipeline(unet=a_, scheduler=a_ )
sde_ve.to(a_ )
sde_ve.set_progress_bar_config(disable=a_ )
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = sde_ve(num_inference_steps=10, output_type="numpy", generator=a_ ).images
UpperCamelCase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase__ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> str:
'''simple docstring'''
UpperCamelCase__ = []
for line in lines:
UpperCamelCase__ = re.sub(r"#.*" , "" , _UpperCamelCase ) # remove comments
if line:
filtered_lines.append(_UpperCamelCase )
UpperCamelCase__ = "\n".join(_UpperCamelCase )
# Make a hash from all this code
UpperCamelCase__ = full_str.encode("utf-8" )
return shaaaa(_UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
__lowercase: Optional[Any] = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
__lowercase: Optional[int] = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
__lowercase: List[Any] = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
__lowercase: Dict[str, List[str]] = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = prime_factors(_UpperCamelCase )
if is_square_free(_UpperCamelCase ):
return -1 if len(_UpperCamelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
__lowercase: Dict = {
"n_samples": 64,
"horizon": 32,
"num_inference_steps": 20,
"n_guide_steps": 2, # can set to 0 for faster sampling, does not use value network
"scale_grad_by_std": True,
"scale": 0.1,
"eta": 0.0,
"t_grad_cutoff": 2,
"device": "cpu",
}
if __name__ == "__main__":
__lowercase: Optional[int] = "hopper-medium-v2"
__lowercase: str = gym.make(env_name)
__lowercase: Optional[Any] = ValueGuidedRLPipeline.from_pretrained(
"bglick13/hopper-medium-v2-value-function-hor32",
env=env,
)
env.seed(0)
__lowercase: Optional[Any] = env.reset()
__lowercase: Tuple = 0
__lowercase: int = 0
__lowercase: List[Any] = 1_000
__lowercase: Union[str, Any] = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
__lowercase: Dict = pipeline(obs, planning_horizon=32)
# execute action in environment
__lowercase ,__lowercase ,__lowercase ,__lowercase: Dict = env.step(denorm_actions)
__lowercase: List[Any] = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"""Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"""
F""" {total_score}"""
)
# save observations for rendering
rollout.append(next_observation.copy())
__lowercase: str = next_observation
except KeyboardInterrupt:
pass
print(F"""Total reward: {total_reward}""")
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(a_ )
UpperCamelCase__ = model.generate(a_, max_new_tokens=10, do_sample=a_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(a_ )
model.generate(a_, max_new_tokens=10, do_sample=a_, streamer=a_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(a_, a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(a_ )
UpperCamelCase__ = model.generate(a_, max_new_tokens=10, do_sample=a_ )
UpperCamelCase__ = tokenizer.decode(greedy_ids[0] )
UpperCamelCase__ = TextIteratorStreamer(a_ )
UpperCamelCase__ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCamelCase__ = Thread(target=model.generate, kwargs=a_ )
thread.start()
UpperCamelCase__ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(a_, a_ )
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(a_ )
UpperCamelCase__ = model.generate(a_, max_new_tokens=10, do_sample=a_ )
UpperCamelCase__ = greedy_ids[:, input_ids.shape[1] :]
UpperCamelCase__ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(a_, skip_prompt=a_ )
model.generate(a_, max_new_tokens=10, do_sample=a_, streamer=a_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
UpperCamelCase__ = cs.out[:-1]
self.assertEqual(a_, a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("distilgpt2" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(a_ )
UpperCamelCase__ = -1
UpperCamelCase__ = torch.ones((1, 5), device=a_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
UpperCamelCase__ = TextStreamer(a_, skip_special_tokens=a_ )
model.generate(a_, max_new_tokens=1, do_sample=a_, streamer=a_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
UpperCamelCase__ = cs.out[:-1] # Remove the final "\n"
UpperCamelCase__ = tokenizer(a_, return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape, (1, 1) )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
UpperCamelCase__ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(a_ )
UpperCamelCase__ = -1
UpperCamelCase__ = ids_tensor((1, 5), vocab_size=model.config.vocab_size ).to(a_ )
UpperCamelCase__ = TextIteratorStreamer(a_, timeout=0.001 )
UpperCamelCase__ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
UpperCamelCase__ = Thread(target=model.generate, kwargs=a_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(a_ ):
UpperCamelCase__ = ""
for new_text in streamer:
streamer_text += new_text
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import argparse
__lowercase: List[Any] = "docs/source/_static/js/custom.js"
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> List[Any]:
'''simple docstring'''
with open(_UpperCamelCase , encoding="utf-8" , newline="\n" ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
UpperCamelCase__ = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(_UpperCamelCase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: List[Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
__lowercase: List[Any] = parser.parse_args()
update_custom_js(args.version)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if openai_config_file == "":
UpperCamelCase__ = OpenAIGPTConfig()
else:
UpperCamelCase__ = OpenAIGPTConfig.from_json_file(_UpperCamelCase )
UpperCamelCase__ = OpenAIGPTModel(_UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
UpperCamelCase__ = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
UpperCamelCase__ = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _UpperCamelCase )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowercase: Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__lowercase: int = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowercase: Any = logging.get_logger(__name__)
__lowercase: List[Any] = {
"microsoft/resnet-50": "https://huggingface.co/microsoft/resnet-50/blob/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'resnet'
_lowerCamelCase : int = ['basic', 'bottleneck']
def __init__( self : Any, a_ : int=3, a_ : str=64, a_ : Optional[Any]=[256, 512, 1024, 2048], a_ : List[str]=[3, 4, 6, 3], a_ : Optional[int]="bottleneck", a_ : int="relu", a_ : Tuple=False, a_ : Dict=None, a_ : List[str]=None, **a_ : Optional[int], ):
"""simple docstring"""
super().__init__(**a_ )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
UpperCamelCase__ = num_channels
UpperCamelCase__ = embedding_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = layer_type
UpperCamelCase__ = hidden_act
UpperCamelCase__ = downsample_in_first_stage
UpperCamelCase__ = ["stem"] + [f'stage{idx}' for idx in range(1, len(a_ ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ = get_aligned_output_features_output_indices(
out_features=a_, out_indices=a_, stage_names=self.stage_names )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = version.parse('1.11')
@property
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return 1e-3
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
class UpperCAmelCase :
def __init__( self : Union[str, Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = arr.split("," )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = [int(self.array[0] )] * len(self.array )
UpperCamelCase__ = [int(self.array[0] )] * len(self.array )
for i in range(1, len(self.array ) ):
UpperCamelCase__ = max(
int(self.array[i] ) + sum_value[i - 1], int(self.array[i] ) )
UpperCamelCase__ = max(sum_value[i], rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__lowercase: List[Any] = input("please input some numbers:")
__lowercase: int = SubArray(whole_array)
__lowercase: str = array.solve_sub_array()
print(("the results is:", re))
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
__lowercase: Union[str, Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
__lowercase: Any = typing.Union[np.floataa, int, float] # noqa: UP007
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Vector , _UpperCamelCase : Vector ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(_UpperCamelCase ) - np.asarray(_UpperCamelCase )) ** 2 ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Vector , _UpperCamelCase : Vector ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(_UpperCamelCase , _UpperCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=1_00_00 , globals=globals() , ) )
benchmark()
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowercase: Dict = imread(r"digital_image_processing/image_data/lena_small.jpg")
__lowercase: Optional[int] = cvtColor(img, COLOR_BGR2GRAY)
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = cn.convert_to_negative(_UpperCamelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
with Image.open("digital_image_processing/image_data/lena_small.jpg" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_UpperCamelCase , 1_10 ) ).startswith(
"<PIL.Image.Image image mode=RGB size=100x100 at" )
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
UpperCamelCase__ = imread("digital_image_processing/image_data/lena_small.jpg" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ = canny.canny(_UpperCamelCase )
# assert canny array for at least one True
assert canny_array.any()
def SCREAMING_SNAKE_CASE__( ) -> Any:
'''simple docstring'''
assert gg.gaussian_filter(_UpperCamelCase , 5 , sigma=0.9 ).all()
def SCREAMING_SNAKE_CASE__( ) -> int:
'''simple docstring'''
UpperCamelCase__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
UpperCamelCase__ = conv.img_convolve(_UpperCamelCase , _UpperCamelCase ).astype(_UpperCamelCase )
assert res.any()
def SCREAMING_SNAKE_CASE__( ) -> List[Any]:
'''simple docstring'''
assert med.median_filter(_UpperCamelCase , 3 ).any()
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = sob.sobel_filter(_UpperCamelCase )
assert grad.any() and theta.any()
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = sp.make_sepia(_UpperCamelCase , 20 )
assert sepia.all()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str:
'''simple docstring'''
UpperCamelCase__ = bs.Burkes(imread(_UpperCamelCase , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = rs.NearestNeighbour(imread(_UpperCamelCase , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = "digital_image_processing/image_data/lena.jpg"
# Reading the image and converting it to grayscale.
UpperCamelCase__ = imread(_UpperCamelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = image[x_coordinate][y_coordinate]
UpperCamelCase__ = lbp.get_neighbors_pixel(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ = lbp.local_binary_value(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
assert lbp_image.any()
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__lowercase: Union[str, Any] = logging.get_logger(__name__)
__lowercase: int = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
"constant": get_constant_schedule,
"constant_w_warmup": get_constant_schedule_with_warmup,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : str, a_ : Optional[int]=None, a_ : str=None, *a_ : str, **a_ : str ):
"""simple docstring"""
super().__init__(*a_, **a_ )
if config is None:
assert isinstance(self.model, a_ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f' {self.model.__class__}'
)
UpperCamelCase__ = self.model.config
else:
UpperCamelCase__ = config
UpperCamelCase__ = data_args
UpperCamelCase__ = self.config.tgt_vocab_size if isinstance(self.config, a_ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'
" padding.." )
if self.args.label_smoothing == 0:
UpperCamelCase__ = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
UpperCamelCase__ = label_smoothed_nll_loss
def lowercase_ ( self : Dict, a_ : int ):
"""simple docstring"""
if self.optimizer is None:
UpperCamelCase__ = ["bias", "LayerNorm.weight"]
UpperCamelCase__ = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
UpperCamelCase__ = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
UpperCamelCase__ = Adafactor
UpperCamelCase__ = {"scale_parameter": False, "relative_step": False}
else:
UpperCamelCase__ = AdamW
UpperCamelCase__ = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
UpperCamelCase__ = self.args.learning_rate
if self.sharded_ddp:
UpperCamelCase__ = OSS(
params=a_, optim=a_, **a_, )
else:
UpperCamelCase__ = optimizer_cls(a_, **a_ )
if self.lr_scheduler is None:
UpperCamelCase__ = self._get_lr_scheduler(a_ )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def lowercase_ ( self : Dict, a_ : List[str] ):
"""simple docstring"""
UpperCamelCase__ = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
UpperCamelCase__ = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
UpperCamelCase__ = schedule_func(self.optimizer, num_warmup_steps=self.args.warmup_steps )
else:
UpperCamelCase__ = schedule_func(
self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=a_ )
return scheduler
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
if isinstance(self.train_dataset, torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size, distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED), )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def lowercase_ ( self : List[Any], a_ : Any, a_ : Union[str, Any], a_ : List[str] ):
"""simple docstring"""
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
UpperCamelCase__ = model(**a_, use_cache=a_ )[0]
UpperCamelCase__ = self.loss_fn(logits.view(-1, logits.shape[-1] ), labels.view(-1 ) )
else:
# compute usual loss via models
UpperCamelCase__ , UpperCamelCase__ = model(**a_, labels=a_, use_cache=a_ )[:2]
else:
# compute label smoothed loss
UpperCamelCase__ = model(**a_, use_cache=a_ )[0]
UpperCamelCase__ = torch.nn.functional.log_softmax(a_, dim=-1 )
UpperCamelCase__ , UpperCamelCase__ = self.loss_fn(a_, a_, self.args.label_smoothing, ignore_index=self.config.pad_token_id )
return loss, logits
def lowercase_ ( self : Any, a_ : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = inputs.pop("labels" )
UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(a_, a_, a_ )
return loss
def lowercase_ ( self : Optional[int], a_ : nn.Module, a_ : Dict[str, Union[torch.Tensor, Any]], a_ : bool, a_ : Optional[List[str]] = None, ):
"""simple docstring"""
UpperCamelCase__ = self._prepare_inputs(a_ )
UpperCamelCase__ = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
UpperCamelCase__ = self.model.generate(
inputs["input_ids"], attention_mask=inputs["attention_mask"], **a_, )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ = self._pad_tensors_to_max_len(a_, gen_kwargs["max_length"] )
UpperCamelCase__ = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
UpperCamelCase__ , UpperCamelCase__ = self._compute_loss(a_, a_, a_ )
UpperCamelCase__ = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
UpperCamelCase__ = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
UpperCamelCase__ = self._pad_tensors_to_max_len(a_, gen_kwargs["max_length"] )
return (loss, logits, labels)
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
f' padded to `max_length`={max_length}' )
UpperCamelCase__ = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device )
UpperCamelCase__ = tensor
return padded_tensor
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowercase: List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[int]=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['pixel_values']
def __init__( self : str, a_ : bool = True, a_ : Dict[str, int] = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = False, a_ : int = 1, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : List[str], ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"height": 384, "width": 384}
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : List[str], a_ : np.ndarray, a_ : Dict[str, int], a_ : bool = False, a_ : int = 1, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : int, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(
a_, output_size=(size["height"], size["width"]), keep_aspect_ratio=a_, multiple=a_, )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : int, a_ : np.ndarray, a_ : Union[int, float], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Tuple, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Dict, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : Optional[Any], a_ : ImageInput, a_ : bool = None, a_ : int = None, a_ : bool = None, a_ : int = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : float = None, a_ : bool = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : ChannelDimension = ChannelDimension.FIRST, **a_ : str, ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
def lowercase_ ( self : Union[str, Any], a_ : int, a_ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a_ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=a_ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
__lowercase: Dict = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"""{bindir}/../../examples/pytorch/translation"""):
from run_translation import main # noqa
set_seed(42)
__lowercase: Optional[Any] = "sshleifer/student_marian_en_ro_6_1"
__lowercase: Union[str, Any] = "sshleifer/tiny-mbart"
@require_torch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : str, a_ : int=False, a_ : Any=None, a_ : Any=True, a_ : Optional[int]=True, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = self.run_trainer(
eval_steps=1, max_len=12, model_name=a_, num_train_epochs=1, distributed=a_, extra_args_str=a_, predict_with_generate=a_, do_train=a_, do_eval=a_, do_predict=a_, )
UpperCamelCase__ = TrainerState.load_from_json(os.path.join(a_, "trainer_state.json" ) ).log_history
if not do_eval:
return
UpperCamelCase__ = [log for log in logs if "eval_loss" in log.keys()]
UpperCamelCase__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCamelCase__ = eval_metrics[-1]
assert isinstance(last_step_stats["eval_bleu"], a_ )
assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_ )
@require_torch_multi_gpu
def lowercase_ ( self : Dict ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp simple" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : List[str] ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp simple --fp16" )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : Tuple ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--sharded_ddp zero_dp_2", predict_with_generate=a_ )
@unittest.skip("Requires an update of the env running those tests" )
@require_torch_multi_gpu
@require_fairscale
def lowercase_ ( self : str ):
"""simple docstring"""
self.run_seqaseq_quick(
distributed=a_, extra_args_str="--sharded_ddp zero_dp_2 --fp16", predict_with_generate=a_ )
@require_apex
@require_torch_gpu
def lowercase_ ( self : str ):
"""simple docstring"""
self.run_seqaseq_quick(distributed=a_, extra_args_str="--fp16 --fp16_backend=apex" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=a_, extra_args_str="--fp16 --fp16_backend=apex" )
@parameterized.expand(["base", "low", "high", "mixed"] )
@require_torch_multi_gpu
def lowercase_ ( self : List[Any], a_ : Any ):
"""simple docstring"""
UpperCamelCase__ = {
# test with the default log_level - should be info and thus log info once
"base": {"extra_args_str": "", "n_matches": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0},
}
UpperCamelCase__ = experiments[experiment_id]
UpperCamelCase__ = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False}
UpperCamelCase__ = "Running training"
with CaptureStderr() as cl:
self.run_seqaseq_quick(**a_, extra_args_str=data["extra_args_str"] )
UpperCamelCase__ = len(re.findall(a_, cl.err ) )
self.assertEqual(a_, data["n_matches"] )
@slow
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.run_trainer(
eval_steps=2, max_len=128, model_name=a_, learning_rate=3e-4, num_train_epochs=10, distributed=a_, )
# Check metrics
UpperCamelCase__ = TrainerState.load_from_json(os.path.join(a_, "trainer_state.json" ) ).log_history
UpperCamelCase__ = [log for log in logs if "eval_loss" in log.keys()]
UpperCamelCase__ = eval_metrics[0]
UpperCamelCase__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["eval_bleu"], a_ )
# test if do_predict saves generations and metrics
UpperCamelCase__ = os.listdir(a_ )
UpperCamelCase__ = {os.path.basename(a_ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowercase_ ( self : str ):
"""simple docstring"""
from transformers.training_args import OptimizerNames
def train_and_return_metrics(a_ : str ) -> Tuple[int, float]:
UpperCamelCase__ = "--skip_memory_metrics 0"
UpperCamelCase__ = self.run_trainer(
max_len=128, model_name=a_, learning_rate=3e-4, num_train_epochs=1, optim=a_, distributed=a_, extra_args_str=a_, do_eval=a_, do_predict=a_, n_gpus_to_use=1, )
# Check metrics
UpperCamelCase__ = TrainerState.load_from_json(Path(a_, "trainer_state.json" ) ).log_history
UpperCamelCase__ = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 )
UpperCamelCase__ = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 )
UpperCamelCase__ = logs[0]["train_loss"]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCamelCase__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCamelCase__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCamelCase__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCamelCase__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCamelCase__ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
a_, a_, "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"
f' a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and'
f' gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB', )
self.assertGreater(
a_, a_, "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"
f' a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and'
f' gpu_total_mem_bnb={gpu_total_mem_bnb}MB', )
self.assertEqual(
a_, a_, f'loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}' )
def lowercase_ ( self : Union[str, Any], a_ : int, a_ : str, a_ : int, a_ : float = 3e-3, a_ : str = "adafactor", a_ : bool = False, a_ : str = None, a_ : int = 0, a_ : bool = True, a_ : bool = True, a_ : bool = True, a_ : bool = True, a_ : int = None, ):
"""simple docstring"""
UpperCamelCase__ = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro"
UpperCamelCase__ = self.get_auto_remove_tmp_dir()
UpperCamelCase__ = f'\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(a_ )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(a_ )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n '.split()
UpperCamelCase__ = f'\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(a_ )}\n '.split()
UpperCamelCase__ = "\n --do_predict\n ".split()
UpperCamelCase__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f'--optim {optim}'.split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCamelCase__ = get_gpu_count()
UpperCamelCase__ = get_torch_dist_unique_port()
UpperCamelCase__ = f'\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n '.split()
UpperCamelCase__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a_, env=self.get_env() )
else:
UpperCamelCase__ = ["run_translation.py"] + args
with patch.object(a_, "argv", a_ ):
main()
return output_dir
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(a - b ) for a, b in zip(_UpperCamelCase , _UpperCamelCase ) ) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[float] ) -> None:
'''simple docstring'''
if point:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
for item in point:
if not isinstance(_UpperCamelCase , (int, float) ):
UpperCamelCase__ = (
"Expected a list of numbers as input, found "
F'{type(_UpperCamelCase ).__name__}'
)
raise TypeError(_UpperCamelCase )
else:
UpperCamelCase__ = F'Expected a list of numbers as input, found {type(_UpperCamelCase ).__name__}'
raise TypeError(_UpperCamelCase )
else:
raise ValueError("Missing an input" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list , _UpperCamelCase : list ) -> float:
'''simple docstring'''
_validate_point(_UpperCamelCase )
_validate_point(_UpperCamelCase )
if len(_UpperCamelCase ) != len(_UpperCamelCase ):
raise ValueError("Both points must be in the same n-dimensional space" )
return float(sum(abs(x - y ) for x, y in zip(_UpperCamelCase , _UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowercase: int = 10
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ = 1
UpperCamelCase__ = max(_UpperCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
UpperCamelCase__ = [[] for _ in range(_UpperCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
UpperCamelCase__ = int((i / placement) % RADIX )
buckets[tmp].append(_UpperCamelCase )
# put each buckets' contents into list_of_ints
UpperCamelCase__ = 0
for b in range(_UpperCamelCase ):
for i in buckets[b]:
UpperCamelCase__ = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Any = IFInpaintingPipeline
_lowerCamelCase : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
_lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_lowerCamelCase : Dict = PipelineTesterMixin.required_optional_params - {'latents'}
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
return self._get_dummy_components()
def lowercase_ ( self : Dict, a_ : Union[str, Any], a_ : Tuple=0 ):
"""simple docstring"""
if str(a_ ).startswith("mps" ):
UpperCamelCase__ = torch.manual_seed(a_ )
else:
UpperCamelCase__ = torch.Generator(device=a_ ).manual_seed(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = floats_tensor((1, 3, 32, 32), rng=random.Random(a_ ) ).to(a_ )
UpperCamelCase__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", )
def lowercase_ ( self : Any ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def lowercase_ ( self : str ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA" )
def lowercase_ ( self : Dict ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def lowercase_ ( self : int ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
self._test_save_load_local()
def lowercase_ ( self : str ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2, )
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase :
def __init__( self : List[Any], a_ : Tuple, a_ : Tuple=3, a_ : Optional[Any]=32, a_ : int=3, a_ : Any=10, a_ : Any=[10, 20, 30, 40], a_ : int=[1, 1, 2, 1], a_ : Optional[Any]=True, a_ : Optional[int]=True, a_ : List[str]="relu", a_ : Optional[int]=3, a_ : Dict=None, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = image_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = embeddings_size
UpperCamelCase__ = hidden_sizes
UpperCamelCase__ = depths
UpperCamelCase__ = is_training
UpperCamelCase__ = use_labels
UpperCamelCase__ = hidden_act
UpperCamelCase__ = num_labels
UpperCamelCase__ = scope
UpperCamelCase__ = len(a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ = None
if self.use_labels:
UpperCamelCase__ = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : str ):
"""simple docstring"""
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, )
def lowercase_ ( self : Any, a_ : Dict, a_ : str, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = TFRegNetModel(config=a_ )
UpperCamelCase__ = model(a_, training=a_ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def lowercase_ ( self : int, a_ : List[Any], a_ : List[str], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.num_labels
UpperCamelCase__ = TFRegNetForImageClassification(a_ )
UpperCamelCase__ = model(a_, labels=a_, training=a_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : int = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_lowerCamelCase : int = (
{'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : str = False
_lowerCamelCase : Any = False
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = TFRegNetModelTester(self )
UpperCamelCase__ = ConfigTester(self, config_class=a_, has_text_modality=a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase_ ( self : int ):
"""simple docstring"""
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0, reason="TF does not support backprop for grouped convolutions on CPU.", )
@slow
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
pass
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ = [*signature.parameters.keys()]
UpperCamelCase__ = ["pixel_values"]
self.assertListEqual(arg_names[:1], a_ )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(a_ : List[Any], a_ : Optional[int], a_ : List[Any] ):
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = model(**self._prepare_for_class(a_, a_ ), training=a_ )
UpperCamelCase__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ = self.model_tester.num_stages
self.assertEqual(len(a_ ), expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], )
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__ = layer_type
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ = True
check_hidden_states_output(a_, a_, a_ )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(a_ : Dict, a_ : Any, a_ : Optional[int], a_ : str={} ):
UpperCamelCase__ = model(a_, return_dict=a_, **a_ )
UpperCamelCase__ = model(a_, return_dict=a_, **a_ ).to_tuple()
def recursive_check(a_ : Optional[Any], a_ : Tuple ):
if isinstance(a_, (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(a_, a_ ):
recursive_check(a_, a_ )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(a_, a_ ) ), msg=(
"Tuple and dict output are not equal. Difference:"
f' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}'
), )
recursive_check(a_, a_ )
for model_class in self.all_model_classes:
UpperCamelCase__ = model_class(a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_ )
check_equivalence(a_, a_, a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
check_equivalence(a_, a_, a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_ )
check_equivalence(a_, a_, a_, {"output_hidden_states": True} )
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
UpperCamelCase__ = self._prepare_for_class(a_, a_, return_labels=a_ )
check_equivalence(a_, a_, a_, {"output_hidden_states": True} )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def lowercase_ ( self : int ):
"""simple docstring"""
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ = TFRegNetModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCAmelCase ( unittest.TestCase):
@cached_property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__ = self.default_image_processor
UpperCamelCase__ = prepare_img()
UpperCamelCase__ = image_processor(images=a_, return_tensors="tf" )
# forward pass
UpperCamelCase__ = model(**a_, training=a_ )
# verify the logits
UpperCamelCase__ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, a_ )
UpperCamelCase__ = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3], a_, atol=1e-4 )
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
__lowercase: int = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, *a_ : Tuple, **a_ : List[str] ):
"""simple docstring"""
super().__init__(*a_, **a_ )
requires_backends(self, "decord" )
self.check_model_type(a_ )
def lowercase_ ( self : Optional[Any], a_ : str=None, a_ : Optional[Any]=None, a_ : Dict=None ):
"""simple docstring"""
UpperCamelCase__ = {}
if frame_sampling_rate is not None:
UpperCamelCase__ = frame_sampling_rate
if num_frames is not None:
UpperCamelCase__ = num_frames
UpperCamelCase__ = {}
if top_k is not None:
UpperCamelCase__ = top_k
return preprocess_params, {}, postprocess_params
def __call__( self : Optional[Any], a_ : Union[str, List[str]], **a_ : str ):
"""simple docstring"""
return super().__call__(a_, **a_ )
def lowercase_ ( self : List[Any], a_ : List[str], a_ : Union[str, Any]=None, a_ : Optional[Any]=1 ):
"""simple docstring"""
if num_frames is None:
UpperCamelCase__ = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
UpperCamelCase__ = BytesIO(requests.get(a_ ).content )
UpperCamelCase__ = VideoReader(a_ )
videoreader.seek(0 )
UpperCamelCase__ = 0
UpperCamelCase__ = num_frames * frame_sampling_rate - 1
UpperCamelCase__ = np.linspace(a_, a_, num=a_, dtype=np.intaa )
UpperCamelCase__ = videoreader.get_batch(a_ ).asnumpy()
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = self.image_processor(a_, return_tensors=self.framework )
return model_inputs
def lowercase_ ( self : str, a_ : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.model(**a_ )
return model_outputs
def lowercase_ ( self : Tuple, a_ : Any, a_ : Dict=5 ):
"""simple docstring"""
if top_k > self.model.config.num_labels:
UpperCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
UpperCamelCase__ = model_outputs.logits.softmax(-1 )[0]
UpperCamelCase__ , UpperCamelCase__ = probs.topk(a_ )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
UpperCamelCase__ = scores.tolist()
UpperCamelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a_, a_ )]
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
import random
class UpperCAmelCase :
@staticmethod
def lowercase_ ( a_ : str ):
"""simple docstring"""
UpperCamelCase__ = [ord(a_ ) for i in text]
UpperCamelCase__ = []
UpperCamelCase__ = []
for i in plain:
UpperCamelCase__ = random.randint(1, 300 )
UpperCamelCase__ = (i + k) * k
cipher.append(a_ )
key.append(a_ )
return cipher, key
@staticmethod
def lowercase_ ( a_ : list[int], a_ : list[int] ):
"""simple docstring"""
UpperCamelCase__ = []
for i in range(len(a_ ) ):
UpperCamelCase__ = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(a_ ) )
return "".join(a_ )
if __name__ == "__main__":
__lowercase ,__lowercase: Optional[int] = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
__lowercase: List[Any] = pytest.mark.integration
@require_faiss
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = Dataset.from_dict({"filename": ["my_name-train" + "_" + str(a_ ) for x in np.arange(30 ).tolist()]} )
return dset
def lowercase_ ( self : List[str] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
UpperCamelCase__ = dset.map(
lambda a_, a_ : {"vecs": i * np.ones(5, dtype=np.floataa )}, with_indices=a_, keep_in_memory=a_ )
UpperCamelCase__ = dset.add_faiss_index("vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
dset.drop_index("vecs" )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs", batch_size=100, metric_type=faiss.METRIC_INNER_PRODUCT, )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs", metric_type=faiss.METRIC_INNER_PRODUCT, )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
dset.save_faiss_index("vecs", tmp_file.name )
dset.load_faiss_index("vecs2", tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("vecs2", np.ones(5, dtype=np.floataa ) )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1, 1 ), index_name="vecs" )
dset.drop_index("vecs" )
self.assertRaises(a_, partial(dset.get_nearest_examples, "vecs2", np.ones(5, dtype=np.floataa ) ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
UpperCamelCase__ = self._create_dummy_dataset()
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase__ = {"acknowledged": True}
mocked_bulk.return_value([(True, None)] * 30 )
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 29}]}}
UpperCamelCase__ = Elasticsearch()
dset.add_elasticsearch_index("filename", es_client=a_ )
UpperCamelCase__ , UpperCamelCase__ = dset.get_nearest_examples("filename", "my_name-train_29" )
self.assertEqual(examples["filename"][0], "my_name-train_29" )
@require_faiss
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal, 5 )
index.add_vectors(np.zeros((5, 5), dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal, 10 )
# single query
UpperCamelCase__ = np.zeros(5, dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertRaises(a_, index.search, query.reshape(-1, 1 ) )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
# batched queries
UpperCamelCase__ = np.eye(5, dtype=np.floataa )[::-1]
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_ )
self.assertRaises(a_, index.search_batch, queries[0] )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([4, 3, 2, 1, 0], a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(string_factory="Flat" )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
UpperCamelCase__ = FaissIndex(string_factory="LSH" )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexLSH )
with self.assertRaises(a_ ):
UpperCamelCase__ = FaissIndex(string_factory="Flat", custom_index=faiss.IndexFlat(5 ) )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
import faiss
UpperCamelCase__ = faiss.IndexFlat(5 )
UpperCamelCase__ = FaissIndex(custom_index=a_ )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index, faiss.IndexFlat )
def lowercase_ ( self : int ):
"""simple docstring"""
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5, dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=a_ ) as tmp_file:
index.save(tmp_file.name )
UpperCamelCase__ = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
UpperCamelCase__ = np.zeros(5, dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertGreater(scores[0], 0 )
self.assertEqual(indices[0], 1 )
@require_faiss
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Optional[Any]:
'''simple docstring'''
import faiss
UpperCamelCase__ = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
UpperCamelCase__ = "index.faiss"
UpperCamelCase__ = F'mock://{index_name}'
index.save(_UpperCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase__ = FaissIndex.load(_UpperCamelCase , storage_options=mockfs.storage_options )
UpperCamelCase__ = np.zeros(5 , dtype=np.floataa )
UpperCamelCase__ = 1
UpperCamelCase__ , UpperCamelCase__ = index.search(_UpperCamelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch("elasticsearch.Elasticsearch.search" ) as mocked_search, patch(
"elasticsearch.client.IndicesClient.create" ) as mocked_index_create, patch("elasticsearch.helpers.streaming_bulk" ) as mocked_bulk:
UpperCamelCase__ = Elasticsearch()
UpperCamelCase__ = {"acknowledged": True}
UpperCamelCase__ = ElasticSearchIndex(es_client=a_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(["foo", "bar", "foobar"] )
# single query
UpperCamelCase__ = "foo"
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search(a_ )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# single query with timeout
UpperCamelCase__ = "foo"
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 0}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search(a_, request_timeout=30 )
self.assertEqual(scores[0], 1 )
self.assertEqual(indices[0], 0 )
# batched queries
UpperCamelCase__ = ["foo", "bar", "foobar"]
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_ )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([1, 1, 1], a_ )
# batched queries with timeout
UpperCamelCase__ = ["foo", "bar", "foobar"]
UpperCamelCase__ = {"hits": {"hits": [{"_score": 1, "_id": 1}]}}
UpperCamelCase__ , UpperCamelCase__ = index.search_batch(a_, request_timeout=30 )
UpperCamelCase__ = [scores[0] for scores in total_scores]
UpperCamelCase__ = [indices[0] for indices in total_indices]
self.assertGreater(np.min(a_ ), 0 )
self.assertListEqual([1, 1, 1], a_ )
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> str:
'''simple docstring'''
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
UpperCamelCase__ = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" , revision=_UpperCamelCase )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
__lowercase: str = "#"
class UpperCAmelCase :
def __init__( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {}
def lowercase_ ( self : Tuple, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in text:
if char not in trie:
UpperCamelCase__ = {}
UpperCamelCase__ = trie[char]
UpperCamelCase__ = True
def lowercase_ ( self : Optional[Any], a_ : str ):
"""simple docstring"""
UpperCamelCase__ = self._trie
for char in prefix:
if char in trie:
UpperCamelCase__ = trie[char]
else:
return []
return self._elements(a_ )
def lowercase_ ( self : int, a_ : dict ):
"""simple docstring"""
UpperCamelCase__ = []
for c, v in d.items():
UpperCamelCase__ = [" "] if c == END else [(c + s) for s in self._elements(a_ )]
result.extend(a_ )
return tuple(a_ )
__lowercase: Tuple = Trie()
__lowercase: Any = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> tuple:
'''simple docstring'''
UpperCamelCase__ = trie.find_word(_UpperCamelCase )
return tuple(string + word for word in suffixes )
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase: Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__lowercase: Tuple = 256_047
__lowercase: Union[str, Any] = 256_145
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[str] = NllbTokenizer
_lowerCamelCase : str = NllbTokenizerFast
_lowerCamelCase : Dict = True
_lowerCamelCase : Optional[int] = True
_lowerCamelCase : int = {}
def lowercase_ ( self : List[str] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase__ = NllbTokenizer(a_, keep_accents=a_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = NllbTokenizer(a_, keep_accents=a_ )
UpperCamelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(a_, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
UpperCamelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
UpperCamelCase__ = tokenizer.convert_tokens_to_ids(a_ )
self.assertListEqual(
a_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
UpperCamelCase__ = tokenizer.convert_ids_to_tokens(a_ )
self.assertListEqual(
a_, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(a_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
UpperCamelCase__ = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(a_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=True
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(a_ )
# Checks it save with the same files
self.assertSequenceEqual(a_, a_ )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(a_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
# Save tokenizer rust, legacy_format=False
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = tokenizer_r.save_pretrained(a_, legacy_format=a_ )
UpperCamelCase__ = tokenizer_p.save_pretrained(a_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
UpperCamelCase__ = tokenizer_r.from_pretrained(a_ )
UpperCamelCase__ = tokenizer_p.from_pretrained(a_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(a_, a_ ) )
shutil.rmtree(a_ )
@require_torch
def lowercase_ ( self : str ):
"""simple docstring"""
if not self.test_seqaseq:
return
UpperCamelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Longer text that will definitely require truncation.
UpperCamelCase__ = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"
" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
UpperCamelCase__ = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
UpperCamelCase__ = tokenizer.prepare_seqaseq_batch(
src_texts=a_, tgt_texts=a_, max_length=3, max_target_length=10, return_tensors="pt", src_lang="eng_Latn", tgt_lang="ron_Latn", )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 10 )
# max_target_length will default to max_length if not specified
UpperCamelCase__ = tokenizer.prepare_seqaseq_batch(
a_, tgt_texts=a_, max_length=3, return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.labels.shape[1], 3 )
UpperCamelCase__ = tokenizer.prepare_seqaseq_batch(
src_texts=a_, max_length=3, max_target_length=10, return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1], 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1], 3 )
self.assertNotIn("decoder_input_ids", a_ )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
pass
def lowercase_ ( self : int ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = [AddedToken("<special>", lstrip=a_ )]
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, **a_ )
UpperCamelCase__ = tokenizer_r.encode("Hey this is a <special> token" )
UpperCamelCase__ = tokenizer_r.encode("<special>", add_special_tokens=a_ )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, **a_, )
UpperCamelCase__ = self.tokenizer_class.from_pretrained(
a_, additional_special_tokens=a_, **a_ )
UpperCamelCase__ = tokenizer_p.encode("Hey this is a <special> token" )
UpperCamelCase__ = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(a_, a_ )
self.assertEqual(a_, a_ )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase):
_lowerCamelCase : int = 'facebook/nllb-200-distilled-600M'
_lowerCamelCase : List[Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
_lowerCamelCase : Tuple = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
_lowerCamelCase : List[Any] = [
256047,
16297,
134408,
8165,
248066,
14734,
950,
1135,
105721,
3573,
83,
27352,
108,
49486,
2,
]
@classmethod
def lowercase_ ( cls : Dict ):
"""simple docstring"""
UpperCamelCase__ = NllbTokenizer.from_pretrained(
cls.checkpoint_name, src_lang="eng_Latn", tgt_lang="ron_Latn" )
UpperCamelCase__ = 1
return cls
def lowercase_ ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"], 25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"], 25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"], 25_6057 )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens, a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(a_, self.tokenizer.all_special_ids )
# fmt: off
UpperCamelCase__ = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
UpperCamelCase__ = self.tokenizer.decode(a_, skip_special_tokens=a_ )
UpperCamelCase__ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=a_ )
self.assertEqual(a_, a_ )
self.assertNotIn(self.tokenizer.eos_token, a_ )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0], a_ )
UpperCamelCase__ = 10
UpperCamelCase__ = self.tokenizer(a_, max_length=a_, truncation=a_ ).input_ids[0]
self.assertEqual(ids[-1], 2 )
self.assertEqual(ids[0], a_ )
self.assertEqual(len(a_ ), a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ), [25_6203, 3] )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
UpperCamelCase__ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(a_ )
UpperCamelCase__ = NllbTokenizer.from_pretrained(a_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids, a_ )
@require_torch
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(
self.src_text, text_target=self.tgt_text, padding=a_, truncation=a_, max_length=len(self.expected_src_tokens ), return_tensors="pt", )
UpperCamelCase__ = shift_tokens_right(
batch["labels"], self.tokenizer.pad_token_id, self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(a_, a_ )
self.assertEqual((2, 15), batch.input_ids.shape )
self.assertEqual((2, 15), batch.attention_mask.shape )
UpperCamelCase__ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens, a_ )
self.assertEqual(a_, batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens, [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id] )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer(self.src_text, padding=a_, truncation=a_, max_length=3, return_tensors="pt" )
UpperCamelCase__ = self.tokenizer(
text_target=self.tgt_text, padding=a_, truncation=a_, max_length=10, return_tensors="pt" )
UpperCamelCase__ = targets["input_ids"]
UpperCamelCase__ = shift_tokens_right(
a_, self.tokenizer.pad_token_id, decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang], )
self.assertEqual(batch.input_ids.shape[1], 3 )
self.assertEqual(batch.decoder_input_ids.shape[1], 10 )
@require_torch
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.tokenizer._build_translation_inputs(
"A test", return_tensors="pt", src_lang="eng_Latn", tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(a_ ), {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
}, )
@require_torch
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = True
UpperCamelCase__ = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids, [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
UpperCamelCase__ = False
UpperCamelCase__ = self.tokenizer(
"UN Chief says there is no military solution in Syria", src_lang="eng_Latn", tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids, [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : torch.FloatTensor
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
@register_to_config
def __init__( self : str, a_ : int = 6_5536, a_ : Optional[int] = None, a_ : int = 2, a_ : int = 2, a_ : int = 0, a_ : str = "fourier", a_ : bool = True, a_ : bool = False, a_ : float = 0.0, a_ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), a_ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), a_ : Tuple[str] = "UNetMidBlock1D", a_ : str = None, a_ : Tuple[int] = (32, 32, 64), a_ : str = None, a_ : int = 8, a_ : int = 1, a_ : bool = False, ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = sample_size
# time
if time_embedding_type == "fourier":
UpperCamelCase__ = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=a_, log=a_, flip_sin_to_cos=a_ )
UpperCamelCase__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
UpperCamelCase__ = Timesteps(
block_out_channels[0], flip_sin_to_cos=a_, downscale_freq_shift=a_ )
UpperCamelCase__ = block_out_channels[0]
if use_timestep_embedding:
UpperCamelCase__ = block_out_channels[0] * 4
UpperCamelCase__ = TimestepEmbedding(
in_channels=a_, time_embed_dim=a_, act_fn=a_, out_dim=block_out_channels[0], )
UpperCamelCase__ = nn.ModuleList([] )
UpperCamelCase__ = None
UpperCamelCase__ = nn.ModuleList([] )
UpperCamelCase__ = None
# down
UpperCamelCase__ = in_channels
for i, down_block_type in enumerate(a_ ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
UpperCamelCase__ = i == len(a_ ) - 1
UpperCamelCase__ = get_down_block(
a_, num_layers=a_, in_channels=a_, out_channels=a_, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(a_ )
# mid
UpperCamelCase__ = get_mid_block(
a_, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=a_, add_downsample=a_, )
# up
UpperCamelCase__ = list(reversed(a_ ) )
UpperCamelCase__ = reversed_block_out_channels[0]
if out_block_type is None:
UpperCamelCase__ = out_channels
else:
UpperCamelCase__ = block_out_channels[0]
for i, up_block_type in enumerate(a_ ):
UpperCamelCase__ = output_channel
UpperCamelCase__ = (
reversed_block_out_channels[i + 1] if i < len(a_ ) - 1 else final_upsample_channels
)
UpperCamelCase__ = i == len(a_ ) - 1
UpperCamelCase__ = get_up_block(
a_, num_layers=a_, in_channels=a_, out_channels=a_, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(a_ )
UpperCamelCase__ = output_channel
# out
UpperCamelCase__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32 )
UpperCamelCase__ = get_out_block(
out_block_type=a_, num_groups_out=a_, embed_dim=block_out_channels[0], out_channels=a_, act_fn=a_, fc_dim=block_out_channels[-1] // 4, )
def lowercase_ ( self : int, a_ : torch.FloatTensor, a_ : Union[torch.Tensor, float, int], a_ : bool = True, ):
"""simple docstring"""
UpperCamelCase__ = timestep
if not torch.is_tensor(a_ ):
UpperCamelCase__ = torch.tensor([timesteps], dtype=torch.long, device=sample.device )
elif torch.is_tensor(a_ ) and len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps[None].to(sample.device )
UpperCamelCase__ = self.time_proj(a_ )
if self.config.use_timestep_embedding:
UpperCamelCase__ = self.time_mlp(a_ )
else:
UpperCamelCase__ = timestep_embed[..., None]
UpperCamelCase__ = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
UpperCamelCase__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
UpperCamelCase__ = ()
for downsample_block in self.down_blocks:
UpperCamelCase__ , UpperCamelCase__ = downsample_block(hidden_states=a_, temb=a_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
UpperCamelCase__ = self.mid_block(a_, a_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
UpperCamelCase__ = down_block_res_samples[-1:]
UpperCamelCase__ = down_block_res_samples[:-1]
UpperCamelCase__ = upsample_block(a_, res_hidden_states_tuple=a_, temb=a_ )
# 5. post-process
if self.out_block:
UpperCamelCase__ = self.out_block(a_, a_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=a_ )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: Optional[Any] = logging.get_logger(__name__)
__lowercase: List[Any] = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[Any] = 'mgp-str'
def __init__( self : Optional[int], a_ : Tuple=[32, 128], a_ : str=4, a_ : int=3, a_ : Optional[int]=27, a_ : str=38, a_ : Dict=5_0257, a_ : List[Any]=3_0522, a_ : Tuple=768, a_ : Union[str, Any]=12, a_ : int=12, a_ : Optional[Any]=4.0, a_ : str=True, a_ : Union[str, Any]=False, a_ : int=1e-5, a_ : Any=0.0, a_ : Dict=0.0, a_ : List[str]=0.0, a_ : Any=False, a_ : Tuple=0.02, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = max_token_length
UpperCamelCase__ = num_character_labels
UpperCamelCase__ = num_bpe_labels
UpperCamelCase__ = num_wordpiece_labels
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = mlp_ratio
UpperCamelCase__ = distilled
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = drop_rate
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = attn_drop_rate
UpperCamelCase__ = drop_path_rate
UpperCamelCase__ = output_aa_attentions
UpperCamelCase__ = initializer_range
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
__lowercase: Union[str, Any] = [
{"dataset": "wikipedia", "config_name": "20220301.de"},
{"dataset": "wikipedia", "config_name": "20220301.en"},
{"dataset": "wikipedia", "config_name": "20220301.fr"},
{"dataset": "wikipedia", "config_name": "20220301.frr"},
{"dataset": "wikipedia", "config_name": "20220301.it"},
{"dataset": "wikipedia", "config_name": "20220301.simple"},
{"dataset": "snli", "config_name": "plain_text"},
{"dataset": "eli5", "config_name": "LFQA_reddit"},
{"dataset": "wiki40b", "config_name": "en"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.compressed"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.nq.no_index"},
{"dataset": "wiki_dpr", "config_name": "psgs_w100.multiset.no_index"},
{"dataset": "natural_questions", "config_name": "default"},
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict=True ) -> Tuple:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=SCREAMING_SNAKE_CASE__))
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = None
_lowerCamelCase : Tuple = None
def lowercase_ ( self : Optional[int], a_ : Any, a_ : Optional[int] ):
"""simple docstring"""
with TemporaryDirectory() as tmp_dir:
UpperCamelCase__ = dataset_module_factory(a_, cache_dir=a_ )
UpperCamelCase__ = import_main_class(dataset_module.module_path, dataset=a_ )
UpperCamelCase__ = builder_cls(
cache_dir=a_, config_name=a_, hash=dataset_module.hash, )
UpperCamelCase__ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=a_ ).replace(os.sep, "/" ),
config.DATASET_INFO_FILENAME,
] )
UpperCamelCase__ = cached_path(a_, cache_dir=a_ )
self.assertTrue(os.path.exists(a_ ) )
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
UpperCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
UpperCamelCase__ = import_main_class(dataset_module.module_path )
UpperCamelCase__ = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
UpperCamelCase__ = None
builder_instance.download_and_prepare()
UpperCamelCase__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=_UpperCamelCase )
UpperCamelCase__ = import_main_class(dataset_module.module_path , dataset=_UpperCamelCase )
UpperCamelCase__ = builder_cls(
cache_dir=_UpperCamelCase , config_name="20220301.frr" , hash=dataset_module.hash , )
UpperCamelCase__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(_UpperCamelCase , _UpperCamelCase )
assert "train" in ds
assert isinstance(ds["train"] , _UpperCamelCase )
assert next(iter(ds["train"] ) )
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__lowercase: List[Any] = logging.get_logger(__name__)
__lowercase: Any = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
__lowercase: Tuple = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Dict:
'''simple docstring'''
for attribute in key.split("." ):
UpperCamelCase__ = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
UpperCamelCase__ = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
UpperCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(_UpperCamelCase )[0].split("." )[-2]
UpperCamelCase__ = mapped_key.replace("*" , _UpperCamelCase )
if "weight_g" in name:
UpperCamelCase__ = "weight_g"
elif "weight_v" in name:
UpperCamelCase__ = "weight_v"
elif "bias" in name and "relative_attention_bias" not in name:
UpperCamelCase__ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCamelCase__ = "weight"
else:
UpperCamelCase__ = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F'Unused weights: {unused_weights}' )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = full_name.split("conv_layers." )[-1]
UpperCamelCase__ = name.split("." )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
UpperCamelCase__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
UpperCamelCase__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=None ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = WavLMConfigOrig(checkpoint["cfg"] )
UpperCamelCase__ = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint["model"] )
model.eval()
if config_path is not None:
UpperCamelCase__ = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
UpperCamelCase__ = WavLMConfig()
UpperCamelCase__ = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
__lowercase: List[Any] = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
__lowercase: Tuple = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowercase: str = TypeVar("T")
__lowercase: List[Any] = TypeVar("U")
class UpperCAmelCase ( Generic[T, U]):
def __init__( self : Optional[Any], a_ : T | None, a_ : U | None ):
"""simple docstring"""
UpperCamelCase__ = key
UpperCamelCase__ = val
UpperCamelCase__ = None
UpperCamelCase__ = None
def __repr__( self : List[Any] ):
"""simple docstring"""
return (
f'Node: key: {self.key}, val: {self.val}, '
f'has next: {bool(self.next )}, has prev: {bool(self.prev )}'
)
class UpperCAmelCase ( Generic[T, U]):
def __init__( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = DoubleLinkedListNode(a_, a_ )
UpperCamelCase__ = DoubleLinkedListNode(a_, a_ )
UpperCamelCase__ , UpperCamelCase__ = self.rear, self.head
def __repr__( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = ["DoubleLinkedList"]
UpperCamelCase__ = self.head
while node.next is not None:
rep.append(str(a_ ) )
UpperCamelCase__ = node.next
rep.append(str(self.rear ) )
return ",\n ".join(a_ )
def lowercase_ ( self : Optional[Any], a_ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
UpperCamelCase__ = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
UpperCamelCase__ = node
UpperCamelCase__ = previous
UpperCamelCase__ = node
UpperCamelCase__ = self.rear
def lowercase_ ( self : Dict, a_ : DoubleLinkedListNode[T, U] ):
"""simple docstring"""
if node.prev is None or node.next is None:
return None
UpperCamelCase__ = node.next
UpperCamelCase__ = node.prev
UpperCamelCase__ = None
UpperCamelCase__ = None
return node
class UpperCAmelCase ( Generic[T, U]):
_lowerCamelCase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self : List[str], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = DoubleLinkedList()
UpperCamelCase__ = capacity
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = 0
UpperCamelCase__ = {}
def __repr__( self : Optional[int] ):
"""simple docstring"""
return (
f'CacheInfo(hits={self.hits}, misses={self.miss}, '
f'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self : Tuple, a_ : T ):
"""simple docstring"""
return key in self.cache
def lowercase_ ( self : Optional[Any], a_ : T ):
"""simple docstring"""
if key in self.cache:
self.hits += 1
UpperCamelCase__ = self.cache[key]
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(a_ )
return node.val
self.miss += 1
return None
def lowercase_ ( self : int, a_ : T, a_ : U ):
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
UpperCamelCase__ = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(a_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
UpperCamelCase__ = DoubleLinkedListNode(a_, a_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
UpperCamelCase__ = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
UpperCamelCase__ = value
self.list.add(a_ )
@classmethod
def lowercase_ ( cls : List[Any], a_ : int = 128 ):
"""simple docstring"""
def cache_decorator_inner(a_ : Callable[[T], U] ) -> Callable[..., U]:
def cache_decorator_wrapper(*a_ : T ) -> U:
if func not in cls.decorator_function_to_instance_map:
UpperCamelCase__ = LRUCache(a_ )
UpperCamelCase__ = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
UpperCamelCase__ = func(*a_ )
cls.decorator_function_to_instance_map[func].put(args[0], a_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(a_, "cache_info", a_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowercase: Any = "\nHuman: <<task>>\n\nAssistant: "
__lowercase: Dict = "huggingface-tools/default-prompts"
__lowercase: Any = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple="run" ) -> str:
'''simple docstring'''
if prompt_or_repo_id is None:
UpperCamelCase__ = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s" , _UpperCamelCase ) is not None:
return prompt_or_repo_id
UpperCamelCase__ = cached_file(
_UpperCamelCase , PROMPT_FILES[mode] , repo_type="dataset" , user_agent={"agent": agent_name} )
with open(_UpperCamelCase , "r" , encoding="utf-8" ) as f:
return f.read()
| 31
|
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Union[str, Any] = CLIPTokenizer
_lowerCamelCase : Dict = CLIPTokenizerFast
_lowerCamelCase : int = True
_lowerCamelCase : Tuple = {}
_lowerCamelCase : Tuple = False
def lowercase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def lowercase_ ( self : Optional[Any], **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : str, **a_ : str ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : List[Any], a_ : Dict ):
"""simple docstring"""
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = "lower newer"
return input_text, output_text
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = ["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
UpperCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_, a_ )
UpperCamelCase__ = tokens + [tokenizer.unk_token]
UpperCamelCase__ = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ), a_ )
@require_ftfy
def lowercase_ ( self : Dict ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = self.tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(a_, **a_ )
UpperCamelCase__ = "A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
UpperCamelCase__ = "xa\u0303y" + " " + "x\xe3y"
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of space type
UpperCamelCase__ = [
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
# Test that the tokenization is identical on unicode of line break type
UpperCamelCase__ = [
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
UpperCamelCase__ = tokenizer_s.tokenize(a_ )
UpperCamelCase__ = tokenizer_r.tokenize(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
UpperCamelCase__ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
UpperCamelCase__ = f'{text_of_1_token} {text_of_1_token}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )), )
UpperCamelCase__ = f' {text}'
UpperCamelCase__ = self.rust_tokenizer_class.from_pretrained(
a_, use_fast=a_, )
UpperCamelCase__ = tokenizer_r(a_, return_offsets_mapping=a_, add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1], (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )), )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
with self.assertRaises(a_ ) as context:
self.rust_tokenizer_class.from_pretrained("robot-test/old-clip-tokenizer" )
self.assertTrue(
context.exception.args[0].startswith(
"The `backend_tokenizer` provided does not match the expected format." ) )
@require_ftfy
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
super().test_tokenization_python_rust_equals()
def lowercase_ ( self : List[str] ):
"""simple docstring"""
pass
| 31
| 1
|
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for i in range(0 , _UpperCamelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(" " , end="" )
for _ in range(0 , i + 1 ): # printing stars
print("* " , end="" )
print()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
for i in range(_UpperCamelCase , 0 , -1 ):
for _ in range(_UpperCamelCase , 0 , -1 ): # printing stars
print("* " , end="" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(" " , end="" )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> int:
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(_UpperCamelCase ) # upper half
reverse_floyd(_UpperCamelCase ) # lower half
if __name__ == "__main__":
print(r"| /\ | |- | |- |--| |\ /| |-")
print(r"|/ \| |- |_ |_ |__| | \/ | |_")
__lowercase: Dict = 1
while K:
__lowercase: List[Any] = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__lowercase: Optional[int] = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 31
|
'''simple docstring'''
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowercase: Any = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.14.0", "To fix: pip install -r examples/pytorch/audio-classification/requirements.txt")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : float , _UpperCamelCase : int = 1_60_00 ) -> str:
'''simple docstring'''
UpperCamelCase__ = int(round(sample_rate * max_length ) )
if len(_UpperCamelCase ) <= sample_length:
return wav
UpperCamelCase__ = randint(0 , len(_UpperCamelCase ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name of a dataset from the datasets package'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the training audio paths and labels.'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'A file containing the validation audio paths and labels.'})
_lowerCamelCase : str = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
_lowerCamelCase : str = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
_lowerCamelCase : str = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
_lowerCamelCase : str = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''})
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_lowerCamelCase : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
_lowerCamelCase : float = field(
default=20 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class UpperCAmelCase :
_lowerCamelCase : str = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'})
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'})
_lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_lowerCamelCase : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Name or path of preprocessor config.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_lowerCamelCase : Optional[bool] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'})
_lowerCamelCase : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def lowercase_ ( self : int ):
"""simple docstring"""
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`.", a_, )
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , _UpperCamelCase , _UpperCamelCase )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCamelCase__ = training_args.get_process_log_level()
logger.setLevel(_UpperCamelCase )
transformers.utils.logging.set_verbosity(_UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
UpperCamelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
UpperCamelCase__ = DatasetDict()
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
"Make sure to set `--label_column_name` to the correct text column - one of "
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
UpperCamelCase__ = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
UpperCamelCase__ = feature_extractor.model_input_names[0]
def train_transforms(_UpperCamelCase : Any ):
UpperCamelCase__ = []
for audio in batch[data_args.audio_column_name]:
UpperCamelCase__ = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(_UpperCamelCase )
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_UpperCamelCase : List[Any] ):
UpperCamelCase__ = [audio["array"] for audio in batch[data_args.audio_column_name]]
UpperCamelCase__ = feature_extractor(_UpperCamelCase , sampling_rate=feature_extractor.sampling_rate )
UpperCamelCase__ = {model_input_name: inputs.get(_UpperCamelCase )}
UpperCamelCase__ = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
UpperCamelCase__ = raw_datasets["train"].features[data_args.label_column_name].names
UpperCamelCase__ , UpperCamelCase__ = {}, {}
for i, label in enumerate(_UpperCamelCase ):
UpperCamelCase__ = str(_UpperCamelCase )
UpperCamelCase__ = label
# Load the accuracy metric from the datasets package
UpperCamelCase__ = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_UpperCamelCase : Any ):
UpperCamelCase__ = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=_UpperCamelCase , references=eval_pred.label_ids )
UpperCamelCase__ = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(_UpperCamelCase ) , labelaid=_UpperCamelCase , idalabel=_UpperCamelCase , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCamelCase__ = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCamelCase__ = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCamelCase__ = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(_UpperCamelCase , output_all_columns=_UpperCamelCase )
# Initialize our trainer
UpperCamelCase__ = Trainer(
model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=_UpperCamelCase , tokenizer=_UpperCamelCase , )
# Training
if training_args.do_train:
UpperCamelCase__ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase__ = last_checkpoint
UpperCamelCase__ = trainer.train(resume_from_checkpoint=_UpperCamelCase )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
UpperCamelCase__ = trainer.evaluate()
trainer.log_metrics("eval" , _UpperCamelCase )
trainer.save_metrics("eval" , _UpperCamelCase )
# Write model card and (optionally) push to hub
UpperCamelCase__ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_UpperCamelCase )
else:
trainer.create_model_card(**_UpperCamelCase )
if __name__ == "__main__":
main()
| 31
| 1
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowercase: List[str] = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : str = UNetaDModel
_lowerCamelCase : List[str] = 'sample'
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (3, 32, 32)
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[int] = UNetaDModel
_lowerCamelCase : List[Any] = 'sample'
@property
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 4
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor([10] ).to(a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
return (4, 32, 32)
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (4, 32, 32)
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(a_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU" )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
model.to(a_ )
UpperCamelCase__ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda", "This test is supposed to run on GPU" )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=a_ )
model_accelerate.to(a_ )
model_accelerate.eval()
UpperCamelCase__ = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ = noise.to(a_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(a_ )
UpperCamelCase__ = model_accelerate(a_, a_ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update", output_loading_info=a_, low_cpu_mem_usage=a_ )
model_normal_load.to(a_ )
model_normal_load.eval()
UpperCamelCase__ = model_normal_load(a_, a_ )["sample"]
assert torch_all_close(a_, a_, rtol=1e-3 )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(a_ )
UpperCamelCase__ = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ = noise.to(a_ )
UpperCamelCase__ = torch.tensor([10] * noise.shape[0] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-3 ) )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : List[Any] = UNetaDModel
_lowerCamelCase : List[str] = 'sample'
@property
def lowercase_ ( self : Dict, a_ : Dict=(32, 32) ):
"""simple docstring"""
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=a_ )
return {"sample": noise, "timestep": time_step}
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return (3, 32, 32)
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return (3, 32, 32)
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1e-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
UpperCamelCase__ = self.dummy_input
return init_dict, inputs_dict
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=a_ )
self.assertIsNotNone(a_ )
self.assertEqual(len(loading_info["missing_keys"] ), 0 )
model.to(a_ )
UpperCamelCase__ = self.dummy_input
UpperCamelCase__ = floats_tensor((4, 3) + (256, 256) ).to(a_ )
UpperCamelCase__ = noise
UpperCamelCase__ = model(**a_ )
assert image is not None, "Make sure output is not None"
@slow
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(a_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (256, 256)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-2 ) )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(a_ )
UpperCamelCase__ = 4
UpperCamelCase__ = 3
UpperCamelCase__ = (32, 32)
UpperCamelCase__ = torch.ones((batch_size, num_channels) + sizes ).to(a_ )
UpperCamelCase__ = torch.tensor(batch_size * [1e-4] ).to(a_ )
with torch.no_grad():
UpperCamelCase__ = model(a_, a_ ).sample
UpperCamelCase__ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(a_, a_, rtol=1e-2 ) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
pass
| 31
|
'''simple docstring'''
import math
import sys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = ""
try:
with open(_UpperCamelCase , "rb" ) as binary_file:
UpperCamelCase__ = binary_file.read()
for dat in data:
UpperCamelCase__ = F'{dat:08b}'
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = {"0": "0", "1": "1"}
UpperCamelCase__ , UpperCamelCase__ = "", ""
UpperCamelCase__ = len(_UpperCamelCase )
for i in range(len(_UpperCamelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
UpperCamelCase__ = lexicon[curr_string]
result += last_match_id
UpperCamelCase__ = last_match_id + "0"
if math.loga(_UpperCamelCase ).is_integer():
UpperCamelCase__ = {}
for curr_key in list(_UpperCamelCase ):
UpperCamelCase__ = lexicon.pop(_UpperCamelCase )
UpperCamelCase__ = new_lex
UpperCamelCase__ = last_match_id + "1"
index += 1
UpperCamelCase__ = ""
return result
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = 8
try:
with open(_UpperCamelCase , "wb" ) as opened_file:
UpperCamelCase__ = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> str:
'''simple docstring'''
UpperCamelCase__ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
UpperCamelCase__ = data_bits[counter:]
UpperCamelCase__ = data_bits[counter + 1 :]
return data_bits
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str ) -> None:
'''simple docstring'''
UpperCamelCase__ = read_file_binary(_UpperCamelCase )
UpperCamelCase__ = remove_prefix(_UpperCamelCase )
UpperCamelCase__ = decompress_data(_UpperCamelCase )
write_file_binary(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 31
| 1
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any , _UpperCamelCase : Any ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
for part_id in partition_order:
UpperCamelCase__ = df.where(F'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(_UpperCamelCase ):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(1_00 ).repartition(1 )
UpperCamelCase__ = Spark(_UpperCamelCase )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(10 ).repartition(2 )
UpperCamelCase__ = [1, 0]
UpperCamelCase__ = _generate_iterable_examples(_UpperCamelCase , _UpperCamelCase ) # Reverse the partitions.
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , _UpperCamelCase )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> str:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(10 ).repartition(1 )
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCamelCase__ = lambda _UpperCamelCase : x.reverse()
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [2, 1, 0] )
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shuffle_data_sources(_UpperCamelCase )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [0, 2] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase__ = SparkExamplesIterable(_UpperCamelCase ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCamelCase__ = _get_expected_row_ids_and_row_dicts_for_partition_order(_UpperCamelCase , [1, 3] )
for i, (row_id, row_dict) in enumerate(_UpperCamelCase ):
UpperCamelCase__ , UpperCamelCase__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE__( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase__ = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCamelCase__ = spark.range(1_00 ).repartition(1 )
UpperCamelCase__ = Spark(_UpperCamelCase )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int = 10_00 ) -> int:
'''simple docstring'''
UpperCamelCase__ = -1
UpperCamelCase__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase__ = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase__ = a * b * c
if candidate >= product:
UpperCamelCase__ = candidate
return product
if __name__ == "__main__":
print(F"""{solution() = }""")
| 31
|
'''simple docstring'''
import argparse
import json
import subprocess
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = []
UpperCamelCase__ = (
F'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
UpperCamelCase__ = subprocess.run(_UpperCamelCase , shell=_UpperCamelCase , stdout=subprocess.PIPE )
UpperCamelCase__ = output.stdout.decode("utf-8" )
UpperCamelCase__ = json.loads(_UpperCamelCase )
UpperCamelCase__ = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_UpperCamelCase )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(_UpperCamelCase ) )
if len(_UpperCamelCase ) > 0:
UpperCamelCase__ = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
return values.split("," )
__lowercase: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--target_runners",
default=None,
type=list_str,
required=True,
help="Comma-separated list of runners to check status.",
)
parser.add_argument(
"--token", default=None, type=str, required=True, help="A token that has actions:read permission."
)
__lowercase: str = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__lowercase: str = logging.get_logger(__name__)
__lowercase: Tuple = TypeVar("DatasetType", Dataset, IterableDataset)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[List[float]] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ) -> DatasetType:
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[DatasetType] , _UpperCamelCase : Optional[DatasetInfo] = None , _UpperCamelCase : Optional[NamedSplit] = None , _UpperCamelCase : int = 0 , ) -> DatasetType:
'''simple docstring'''
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
"is an empty dataset dictionary." )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(_UpperCamelCase ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.' )
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 31
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase: Tuple = logging.get_logger(__name__)
__lowercase: List[Any] = {"vocab_file": "spm_char.model"}
__lowercase: int = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__lowercase: Any = {
"microsoft/speecht5_asr": 1_024,
"microsoft/speecht5_tts": 1_024,
"microsoft/speecht5_vc": 1_024,
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Tuple = ['input_ids', 'attention_mask']
def __init__( self : Optional[int], a_ : Tuple, a_ : str="<s>", a_ : List[str]="</s>", a_ : int="<unk>", a_ : Union[str, Any]="<pad>", a_ : Optional[Dict[str, Any]] = None, **a_ : Union[str, Any], ):
"""simple docstring"""
UpperCamelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a_, eos_token=a_, unk_token=a_, pad_token=a_, sp_model_kwargs=self.sp_model_kwargs, **a_, )
UpperCamelCase__ = vocab_file
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
@property
def lowercase_ ( self : int ):
"""simple docstring"""
return self.sp_model.get_piece_size()
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = None
return state
def __setstate__( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
UpperCamelCase__ = {}
UpperCamelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase_ ( self : Tuple, a_ : str ):
"""simple docstring"""
return self.sp_model.encode(a_, out_type=a_ )
def lowercase_ ( self : Union[str, Any], a_ : List[str] ):
"""simple docstring"""
return self.sp_model.piece_to_id(a_ )
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.sp_model.IdToPiece(a_ )
return token
def lowercase_ ( self : int, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = []
UpperCamelCase__ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a_ ) + token
UpperCamelCase__ = []
else:
current_sub_tokens.append(a_ )
out_string += self.sp_model.decode(a_ )
return out_string.strip()
def lowercase_ ( self : Optional[int], a_ : Dict, a_ : Tuple=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[int], a_ : List[int], a_ : Optional[List[int]] = None, a_ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a_, token_ids_a=a_, already_has_special_tokens=a_ )
UpperCamelCase__ = [1]
if token_ids_a is None:
return ([0] * len(a_ )) + suffix_ones
return ([0] * len(a_ )) + ([0] * len(a_ )) + suffix_ones
def lowercase_ ( self : List[Any], a_ : str, a_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(a_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase__ = os.path.join(
a_, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_, "wb" ) as fi:
UpperCamelCase__ = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list ) -> float:
'''simple docstring'''
UpperCamelCase__ = 0
while len(_UpperCamelCase ) > 1:
UpperCamelCase__ = 0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
UpperCamelCase__ = files.index(min(_UpperCamelCase ) )
temp += files[min_index]
files.pop(_UpperCamelCase )
files.append(_UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : bytes ) -> str:
'''simple docstring'''
return "".join([hex(_UpperCamelCase )[2:].zfill(2 ).upper() for byte in list(_UpperCamelCase )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bytes:
'''simple docstring'''
if (len(_UpperCamelCase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(_UpperCamelCase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(_UpperCamelCase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] ) -> None:
'''simple docstring'''
create_state_space_tree(_UpperCamelCase , [] , 0 , [0 for i in range(len(_UpperCamelCase ) )] )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int | str] , _UpperCamelCase : list[int | str] , _UpperCamelCase : int , _UpperCamelCase : list[int] , ) -> None:
'''simple docstring'''
if index == len(_UpperCamelCase ):
print(_UpperCamelCase )
return
for i in range(len(_UpperCamelCase ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
UpperCamelCase__ = True
create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 , _UpperCamelCase )
current_sequence.pop()
UpperCamelCase__ = False
__lowercase: list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
__lowercase: list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 31
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class UpperCAmelCase ( unittest.TestCase):
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tempfile.mkdtemp()
# fmt: off
UpperCamelCase__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase__ = dict(zip(a_, range(len(a_ ) ) ) )
UpperCamelCase__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase__ = {"unk_token": "<unk>"}
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase__ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file, "w", encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file, "w", encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
UpperCamelCase__ = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCamelCase__ = os.path.join(self.tmpdirname, a_ )
with open(self.image_processor_file, "w", encoding="utf-8" ) as fp:
json.dump(a_, a_ )
def lowercase_ ( self : Union[str, Any], **a_ : List[Any] ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Tuple, **a_ : Dict ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[int], **a_ : List[Any] ):
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname, **a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta )]
UpperCamelCase__ = [Image.fromarray(np.moveaxis(a_, 0, -1 ) ) for x in image_inputs]
return image_inputs
def lowercase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = self.get_rust_tokenizer()
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname, use_fast=a_ )
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer, a_ )
self.assertIsInstance(processor_fast.tokenizer, a_ )
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor, a_ )
self.assertIsInstance(processor_fast.image_processor, a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = CLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase__ = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)" )
UpperCamelCase__ = self.get_image_processor(do_normalize=a_, padding_value=1.0 )
UpperCamelCase__ = CLIPProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=a_, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, a_ )
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor, a_ )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = image_processor(a_, return_tensors="np" )
UpperCamelCase__ = processor(images=a_, return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2 )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = processor(text=a_ )
UpperCamelCase__ = tokenizer(a_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(a_ ):
processor()
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase__ = processor.batch_decode(a_ )
UpperCamelCase__ = tokenizer.batch_decode(a_ )
self.assertListEqual(a_, a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.get_image_processor()
UpperCamelCase__ = self.get_tokenizer()
UpperCamelCase__ = CLIPProcessor(tokenizer=a_, image_processor=a_ )
UpperCamelCase__ = "lower newer"
UpperCamelCase__ = self.prepare_image_inputs()
UpperCamelCase__ = processor(text=a_, images=a_ )
self.assertListEqual(list(inputs.keys() ), processor.model_input_names )
| 31
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase: int = logging.get_logger(__name__)
__lowercase: str = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'yolos'
def __init__( self : List[str], a_ : Optional[int]=768, a_ : Optional[int]=12, a_ : Any=12, a_ : List[str]=3072, a_ : Any="gelu", a_ : int=0.0, a_ : List[Any]=0.0, a_ : Dict=0.02, a_ : Optional[int]=1e-1_2, a_ : List[Any]=[512, 864], a_ : Any=16, a_ : Any=3, a_ : Tuple=True, a_ : List[str]=100, a_ : Union[str, Any]=True, a_ : Any=False, a_ : List[str]=1, a_ : Tuple=5, a_ : Union[str, Any]=2, a_ : int=5, a_ : Union[str, Any]=2, a_ : Dict=0.1, **a_ : Dict, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = image_size
UpperCamelCase__ = patch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = qkv_bias
UpperCamelCase__ = num_detection_tokens
UpperCamelCase__ = use_mid_position_embeddings
UpperCamelCase__ = auxiliary_loss
# Hungarian matcher
UpperCamelCase__ = class_cost
UpperCamelCase__ = bbox_cost
UpperCamelCase__ = giou_cost
# Loss coefficients
UpperCamelCase__ = bbox_loss_coefficient
UpperCamelCase__ = giou_loss_coefficient
UpperCamelCase__ = eos_coefficient
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Union[str, Any] = version.parse('1.11')
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return 1e-4
@property
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
return 12
| 31
| 1
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_UpperCamelCase ) , _UpperCamelCase )
return number - int(_UpperCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 31
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=False ) -> Union[str, Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
if not is_sharded:
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading PyTorch weights from {pt_path}' )
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
logger.info(F'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
UpperCamelCase__ = convert_pytorch_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase__ = convert_pytorch_sharded_state_dict_to_flax(_UpperCamelCase , _UpperCamelCase )
return flax_state_dict
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple[str] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, jnp.ndarray] , _UpperCamelCase : str , ) -> (Tuple[str], np.ndarray):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(_UpperCamelCase : Tuple[str] ) -> bool:
return len(set(_UpperCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase__ = pt_tuple_key[:-1] + ("scale",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase__ = pt_tuple_key[:-1] + ("mean",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase__ = pt_tuple_key[:-1] + ("var",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase__ = pt_tuple_key[:-1] + ("embedding",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_UpperCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase__ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_UpperCamelCase ):
UpperCamelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase__ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase__ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase__ = pt_tuple_key[-2] + "_g"
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase__ = pt_tuple_key[-2] + "_v"
if name is not None:
UpperCamelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flatten_dict(flax_model.params["batch_stats"] )
random_flax_state_dict.update(_UpperCamelCase )
UpperCamelCase__ = {}
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any:
'''simple docstring'''
import torch
# Load the index
UpperCamelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase__ = torch.load(_UpperCamelCase )
UpperCamelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase__ = flax_model.params["params"]
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["batch_stats"] ) )
else:
UpperCamelCase__ = flax_model.params
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split("." )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split("." )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase__ = tuple(pt_key.split("." ) )
# remove base model prefix if necessary
UpperCamelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase__ , UpperCamelCase__ = rename_key_and_reshape_tensor(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# add model prefix if necessary
UpperCamelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_UpperCamelCase , _UpperCamelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase__ = jnp.asarray(_UpperCamelCase )
return unflatten_dict(_UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ = os.path.abspath(_UpperCamelCase )
logger.info(F'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
UpperCamelCase__ = getattr(_UpperCamelCase , "Flax" + model.__class__.__name__ )
# load flax weight dict
with open(_UpperCamelCase , "rb" ) as state_f:
try:
UpperCamelCase__ = from_bytes(_UpperCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Optional[Any]:
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"
" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"
" instructions." )
raise
# check if we have bf16 weights
UpperCamelCase__ = flatten_dict(jax.tree_util.tree_map(lambda _UpperCamelCase : x.dtype == jnp.bfloataa , _UpperCamelCase ) ).values()
if any(_UpperCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` "
"before loading those in PyTorch model." )
UpperCamelCase__ = jax.tree_util.tree_map(
lambda _UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _UpperCamelCase )
UpperCamelCase__ = flatten_dict(_UpperCamelCase )
UpperCamelCase__ = pt_model.state_dict()
UpperCamelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split("." )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split("." )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase__ = []
UpperCamelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase__ = ".".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_UpperCamelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = jnp.transpose(_UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCamelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
UpperCamelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("weight",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_mean",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase__ = flax_key_tuple[:-1] + ("running_var",)
if "batch_stats" in flax_state:
UpperCamelCase__ = ".".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase__ = ".".join(_UpperCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase__ = key.split("." )
UpperCamelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase__ = key_components[-2] + "_g"
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase__ = key_components[-2] + "_v"
if name is not None:
UpperCamelCase__ = key_components[:-3] + [name]
UpperCamelCase__ = ".".join(_UpperCamelCase )
UpperCamelCase__ = key
if flax_key in special_pt_names:
UpperCamelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
F'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
UpperCamelCase__ = np.asarray(_UpperCamelCase ) if not isinstance(_UpperCamelCase , np.ndarray ) else flax_tensor
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# remove from missing keys
missing_keys.remove(_UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_UpperCamelCase )
pt_model.load_state_dict(_UpperCamelCase )
# re-transform missing_keys to list
UpperCamelCase__ = list(_UpperCamelCase )
if len(_UpperCamelCase ) > 0:
logger.warning(
"Some weights of the Flax model were not used when initializing the PyTorch model"
F' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
F' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"
F' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"
" FlaxBertForSequenceClassification model)." )
else:
logger.warning(F'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_UpperCamelCase ) > 0:
logger.warning(
F'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
F' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
" use it for predictions and inference." )
else:
logger.warning(
F'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"If your task is similar to the task the model of the checkpoint was trained on, "
F'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 31
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowercase: Optional[Any] = logging.get_logger(__name__)
__lowercase: Dict = {
"microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json",
"microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json",
"microsoft/deberta-v2-xlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"
),
"microsoft/deberta-v2-xxlarge-mnli": (
"https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"
),
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'deberta-v2'
def __init__( self : int, a_ : Optional[Any]=12_8100, a_ : Dict=1536, a_ : List[str]=24, a_ : Optional[Any]=24, a_ : Dict=6144, a_ : Dict="gelu", a_ : Dict=0.1, a_ : Dict=0.1, a_ : List[str]=512, a_ : int=0, a_ : Optional[int]=0.02, a_ : Any=1e-7, a_ : List[str]=False, a_ : str=-1, a_ : Union[str, Any]=0, a_ : Tuple=True, a_ : Any=None, a_ : int=0, a_ : Union[str, Any]="gelu", **a_ : Tuple, ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = relative_attention
UpperCamelCase__ = max_relative_positions
UpperCamelCase__ = pad_token_id
UpperCamelCase__ = position_biased_input
# Backwards compatibility
if type(a_ ) == str:
UpperCamelCase__ = [x.strip() for x in pos_att_type.lower().split("|" )]
UpperCamelCase__ = pos_att_type
UpperCamelCase__ = vocab_size
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = kwargs.get("pooler_hidden_size", a_ )
UpperCamelCase__ = pooler_dropout
UpperCamelCase__ = pooler_hidden_act
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@property
def lowercase_ ( self : List[str] ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def lowercase_ ( self : Dict ):
"""simple docstring"""
return 12
def lowercase_ ( self : Optional[Any], a_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"], a_ : int = -1, a_ : int = -1, a_ : int = -1, a_ : bool = False, a_ : Optional["TensorType"] = None, a_ : int = 3, a_ : int = 40, a_ : int = 40, a_ : "PreTrainedTokenizerBase" = None, ):
"""simple docstring"""
UpperCamelCase__ = super().generate_dummy_inputs(preprocessor=a_, framework=a_ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = credit_card_number
UpperCamelCase__ = 0
UpperCamelCase__ = len(_UpperCamelCase ) - 2
for i in range(_UpperCamelCase , -1 , -2 ):
# double the value of every second digit
UpperCamelCase__ = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
UpperCamelCase__ = cc_number[:i] + str(_UpperCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(_UpperCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> bool:
'''simple docstring'''
UpperCamelCase__ = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(_UpperCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(_UpperCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(_UpperCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("4111111111111111")
validate_credit_card_number("32323")
| 31
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowercase: Optional[int] = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Dict = ["DeiTFeatureExtractor"]
__lowercase: Union[str, Any] = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Union[str, Any] = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__lowercase: List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase: Dict = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase: Optional[int] = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__lowercase: Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 31
| 1
|
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Dict, a_ : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
UpperCamelCase__ = nn.ModuleList(a_ )
def lowercase_ ( self : List[str], a_ : torch.FloatTensor, a_ : Union[torch.Tensor, float, int], a_ : torch.Tensor, a_ : List[torch.tensor], a_ : List[float], a_ : Optional[torch.Tensor] = None, a_ : Optional[torch.Tensor] = None, a_ : Optional[torch.Tensor] = None, a_ : Optional[Dict[str, Any]] = None, a_ : bool = False, a_ : bool = True, ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a_, a_, self.nets ) ):
UpperCamelCase__ , UpperCamelCase__ = controlnet(
a_, a_, a_, a_, a_, a_, a_, a_, a_, a_, a_, )
# merge samples
if i == 0:
UpperCamelCase__ , UpperCamelCase__ = down_samples, mid_sample
else:
UpperCamelCase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a_, a_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase_ ( self : Optional[int], a_ : Union[str, os.PathLike], a_ : bool = True, a_ : Callable = None, a_ : bool = False, a_ : Optional[str] = None, ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a_, is_main_process=a_, save_function=a_, safe_serialization=a_, variant=a_, )
idx += 1
UpperCamelCase__ = model_path_to_save + f'_{idx}'
@classmethod
def lowercase_ ( cls : List[Any], a_ : Optional[Union[str, os.PathLike]], **a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = 0
UpperCamelCase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase__ = pretrained_model_path
while os.path.isdir(a_ ):
UpperCamelCase__ = ControlNetModel.from_pretrained(a_, **a_ )
controlnets.append(a_ )
idx += 1
UpperCamelCase__ = pretrained_model_path + f'_{idx}'
logger.info(f'{len(a_ )} controlnets loaded from {pretrained_model_path}.' )
if len(a_ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(a_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(a_ )
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Dict , _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Dict: # noqa: E741
'''simple docstring'''
while r - l > 1:
UpperCamelCase__ = (l + r) // 2
if v[m] >= key:
UpperCamelCase__ = m
else:
UpperCamelCase__ = m # noqa: E741
return r
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : list[int] ) -> int:
'''simple docstring'''
if len(_UpperCamelCase ) == 0:
return 0
UpperCamelCase__ = [0] * len(_UpperCamelCase )
UpperCamelCase__ = 1
UpperCamelCase__ = v[0]
for i in range(1 , len(_UpperCamelCase ) ):
if v[i] < tail[0]:
UpperCamelCase__ = v[i]
elif v[i] > tail[length - 1]:
UpperCamelCase__ = v[i]
length += 1
else:
UpperCamelCase__ = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = (CMStochasticIterativeScheduler,)
_lowerCamelCase : Tuple = 10
def lowercase_ ( self : List[str], **a_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**a_ )
return config
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = 10
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = self.scheduler_classes[0](**a_ )
scheduler.set_timesteps(a_ )
UpperCamelCase__ = scheduler.timesteps[0]
UpperCamelCase__ = scheduler.timesteps[1]
UpperCamelCase__ = self.dummy_sample
UpperCamelCase__ = 0.1 * sample
UpperCamelCase__ = scheduler.step(a_, a_, a_ ).prev_sample
UpperCamelCase__ = scheduler.step(a_, a_, a_ ).prev_sample
self.assertEqual(output_a.shape, sample.shape )
self.assertEqual(output_a.shape, output_a.shape )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a_ )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = 1
scheduler.set_timesteps(a_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a_ ):
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
# 2. predict noise residual
UpperCamelCase__ = model(a_, a_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(a_, a_, a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = [106, 0]
scheduler.set_timesteps(timesteps=a_ )
UpperCamelCase__ = scheduler.timesteps
UpperCamelCase__ = torch.manual_seed(0 )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase__ = scheduler.scale_model_input(a_, a_ )
# 2. predict noise residual
UpperCamelCase__ = model(a_, a_ )
# 3. predict previous sample x_t-1
UpperCamelCase__ = scheduler.step(a_, a_, a_, generator=a_ ).prev_sample
UpperCamelCase__ = pred_prev_sample
UpperCamelCase__ = torch.sum(torch.abs(a_ ) )
UpperCamelCase__ = torch.mean(torch.abs(a_ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = [39, 30, 12, 15, 0]
with self.assertRaises(a_, msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=a_ )
def lowercase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = [39, 30, 12, 1, 0]
UpperCamelCase__ = len(a_ )
with self.assertRaises(a_, msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=a_, timesteps=a_ )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**a_ )
UpperCamelCase__ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a_, msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}", ):
scheduler.set_timesteps(timesteps=a_ )
| 31
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = ['torch', 'scipy']
def __init__( self : List[str], *a_ : Optional[int], **a_ : int ):
"""simple docstring"""
requires_backends(self, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Dict, *a_ : Tuple, **a_ : Dict ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
@classmethod
def lowercase_ ( cls : Optional[Any], *a_ : List[Any], **a_ : Any ):
"""simple docstring"""
requires_backends(cls, ["torch", "scipy"] )
| 31
| 1
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , ) -> tuple:
'''simple docstring'''
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative in a semiconductor" )
elif hole_conc < 0:
raise ValueError("Hole concentration cannot be negative in a semiconductor" )
elif intrinsic_conc < 0:
raise ValueError(
"Intrinsic concentration cannot be negative in a semiconductor" )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase: str = logging.get_logger(__name__)
__lowercase: Optional[Any] = {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Any = 'gpt_neox_japanese'
def __init__( self : List[str], a_ : Union[str, Any]=3_2000, a_ : str=2560, a_ : Dict=32, a_ : Tuple=32, a_ : Union[str, Any]=4, a_ : Union[str, Any]="gelu", a_ : int=1.00, a_ : Dict=1_0000, a_ : Any=2048, a_ : Optional[int]=0.02, a_ : int=1e-5, a_ : int=True, a_ : Optional[int]=3_1996, a_ : List[str]=3_1999, a_ : List[str]=0.1, a_ : Optional[int]=0.0, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(bos_token_id=a_, eos_token_id=a_, **a_ )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_multiple_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = rotary_pct
UpperCamelCase__ = rotary_emb_base
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = use_cache
UpperCamelCase__ = attention_dropout
UpperCamelCase__ = hidden_dropout
| 31
| 1
|
'''simple docstring'''
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase: int = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = SwinConfig.from_pretrained(
"microsoft/swin-tiny-patch4-window7-224" , out_features=["stage1", "stage2", "stage3", "stage4"] )
UpperCamelCase__ = MaskFormerConfig(backbone_config=_UpperCamelCase )
UpperCamelCase__ = "huggingface/label-files"
if "ade20k-full" in model_name:
# this should be ok
UpperCamelCase__ = 8_47
UpperCamelCase__ = "maskformer-ade20k-full-id2label.json"
elif "ade" in model_name:
# this should be ok
UpperCamelCase__ = 1_50
UpperCamelCase__ = "ade20k-id2label.json"
elif "coco-stuff" in model_name:
# this should be ok
UpperCamelCase__ = 1_71
UpperCamelCase__ = "maskformer-coco-stuff-id2label.json"
elif "coco" in model_name:
# TODO
UpperCamelCase__ = 1_33
UpperCamelCase__ = "coco-panoptic-id2label.json"
elif "cityscapes" in model_name:
# this should be ok
UpperCamelCase__ = 19
UpperCamelCase__ = "cityscapes-id2label.json"
elif "vistas" in model_name:
# this should be ok
UpperCamelCase__ = 65
UpperCamelCase__ = "mapillary-vistas-id2label.json"
UpperCamelCase__ = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="dataset" ) , "r" ) )
UpperCamelCase__ = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ = []
# stem
# fmt: off
rename_keys.append(("backbone.patch_embed.proj.weight", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.proj.bias", "model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "model.pixel_level_module.encoder.model.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "model.pixel_level_module.encoder.model.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.relative_position_index', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.attn.proj.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.norm2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc1.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((F'backbone.layers.{i}.blocks.{j}.mlp.fc2.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((F'backbone.layers.{i}.downsample.reduction.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.weight', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((F'backbone.layers.{i}.downsample.norm.bias', F'model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'model.pixel_level_module.encoder.hidden_states_norms.{i}.bias') )
# FPN
rename_keys.append(("sem_seg_head.layer_4.weight", "model.pixel_level_module.decoder.fpn.stem.0.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.weight", "model.pixel_level_module.decoder.fpn.stem.1.weight") )
rename_keys.append(("sem_seg_head.layer_4.norm.bias", "model.pixel_level_module.decoder.fpn.stem.1.bias") )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((F'sem_seg_head.adapter_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight') )
rename_keys.append((F'sem_seg_head.adapter_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.weight', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight') )
rename_keys.append((F'sem_seg_head.layer_{source_index}.norm.bias', F'model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias') )
rename_keys.append(("sem_seg_head.mask_features.weight", "model.pixel_level_module.decoder.mask_projection.weight") )
rename_keys.append(("sem_seg_head.mask_features.bias", "model.pixel_level_module.decoder.mask_projection.bias") )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias') )
# cross-attention out projection
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias') )
# MLP 1
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight', F'model.transformer_module.decoder.layers.{idx}.fc1.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias', F'model.transformer_module.decoder.layers.{idx}.fc1.bias') )
# MLP 2
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight', F'model.transformer_module.decoder.layers.{idx}.fc2.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias', F'model.transformer_module.decoder.layers.{idx}.fc2.bias') )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias', F'model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias') )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias', F'model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias') )
# layernorm 3 (final layernorm)
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight') )
rename_keys.append((F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias', F'model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias') )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.weight", "model.transformer_module.decoder.layernorm.weight") )
rename_keys.append(("sem_seg_head.predictor.transformer.decoder.norm.bias", "model.transformer_module.decoder.layernorm.bias") )
# heads on top
rename_keys.append(("sem_seg_head.predictor.query_embed.weight", "model.transformer_module.queries_embedder.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.weight", "model.transformer_module.input_projection.weight") )
rename_keys.append(("sem_seg_head.predictor.input_proj.bias", "model.transformer_module.input_projection.bias") )
rename_keys.append(("sem_seg_head.predictor.class_embed.weight", "class_predictor.weight") )
rename_keys.append(("sem_seg_head.predictor.class_embed.bias", "class_predictor.bias") )
for i in range(3 ):
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.weight', F'mask_embedder.{i}.0.weight') )
rename_keys.append((F'sem_seg_head.predictor.mask_embed.layers.{i}.bias', F'mask_embedder.{i}.0.bias') )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = dct.pop(_UpperCamelCase )
UpperCamelCase__ = val
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) -> int:
'''simple docstring'''
UpperCamelCase__ = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
UpperCamelCase__ = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.weight' )
UpperCamelCase__ = state_dict.pop(F'backbone.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[:dim, :]
UpperCamelCase__ = in_proj_bias[: dim]
UpperCamelCase__ = in_proj_weight[
dim : dim * 2, :
]
UpperCamelCase__ = in_proj_bias[
dim : dim * 2
]
UpperCamelCase__ = in_proj_weight[
-dim :, :
]
UpperCamelCase__ = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] ) -> str:
'''simple docstring'''
UpperCamelCase__ = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight' )
UpperCamelCase__ = state_dict.pop(F'sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
UpperCamelCase__ = in_proj_weight[: hidden_size, :]
UpperCamelCase__ = in_proj_bias[:config.hidden_size]
UpperCamelCase__ = in_proj_weight[hidden_size : hidden_size * 2, :]
UpperCamelCase__ = in_proj_bias[hidden_size : hidden_size * 2]
UpperCamelCase__ = in_proj_weight[-hidden_size :, :]
UpperCamelCase__ = in_proj_bias[-hidden_size :]
# fmt: on
def SCREAMING_SNAKE_CASE__( ) -> torch.Tensor:
'''simple docstring'''
UpperCamelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCamelCase__ = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : bool = False ) -> Dict:
'''simple docstring'''
UpperCamelCase__ = get_maskformer_config(_UpperCamelCase )
# load original state_dict
with open(_UpperCamelCase , "rb" ) as f:
UpperCamelCase__ = pickle.load(_UpperCamelCase )
UpperCamelCase__ = data["model"]
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
UpperCamelCase__ = create_rename_keys(_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_swin_q_k_v(_UpperCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_UpperCamelCase , _UpperCamelCase )
# update to torch tensors
for key, value in state_dict.items():
UpperCamelCase__ = torch.from_numpy(_UpperCamelCase )
# load 🤗 model
UpperCamelCase__ = MaskFormerForInstanceSegmentation(_UpperCamelCase )
model.eval()
for name, param in model.named_parameters():
print(_UpperCamelCase , param.shape )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(_UpperCamelCase ) == 0, F'Unexpected keys: {unexpected_keys}'
# verify results
UpperCamelCase__ = prepare_img()
if "vistas" in model_name:
UpperCamelCase__ = 65
elif "cityscapes" in model_name:
UpperCamelCase__ = 6_55_35
else:
UpperCamelCase__ = 2_55
UpperCamelCase__ = True if "ade" in model_name else False
UpperCamelCase__ = MaskFormerImageProcessor(ignore_index=_UpperCamelCase , reduce_labels=_UpperCamelCase )
UpperCamelCase__ = image_processor(_UpperCamelCase , return_tensors="pt" )
UpperCamelCase__ = model(**_UpperCamelCase )
print("Logits:" , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
UpperCamelCase__ = torch.tensor(
[[3.6_3_5_3, -4.4_7_7_0, -2.6_0_6_5], [0.5_0_8_1, -4.2_3_9_4, -3.5_3_4_3], [2.1_9_0_9, -5.0_3_5_3, -1.9_3_2_3]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCamelCase , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and image processor to {pytorch_dump_folder_path}' )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
print("Pushing model and image processor to the hub..." )
model.push_to_hub(F'nielsr/{model_name}' )
image_processor.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
__lowercase: Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="maskformer-swin-tiny-ade",
type=str,
help=("Name of the MaskFormer model you'd like to convert",),
)
parser.add_argument(
"--checkpoint_path",
default="/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl",
type=str,
help="Path to the original state dict (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__lowercase: Union[str, Any] = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 31
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
from math import pi, sqrt, tan
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("surface_area_cube() only accepts non-negative values" )
return 6 * side_length**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError("surface_area_cuboid() only accepts non-negative values" )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_sphere() only accepts non-negative values" )
return 4 * pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("surface_area_hemisphere() only accepts non-negative values" )
return 3 * pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cone() only accepts non-negative values" )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
"surface_area_conical_frustum() only accepts non-negative values" )
UpperCamelCase__ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError("surface_area_cylinder() only accepts non-negative values" )
return 2 * pi * radius * (height + radius)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError("surface_area_torus() only accepts non-negative values" )
if torus_radius < tube_radius:
raise ValueError(
"surface_area_torus() does not support spindle or self intersecting tori" )
return 4 * pow(_UpperCamelCase , 2 ) * torus_radius * tube_radius
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError("area_rectangle() only accepts non-negative values" )
return length * width
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if side_length < 0:
raise ValueError("area_square() only accepts non-negative values" )
return side_length**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_triangle() only accepts non-negative values" )
return (base * height) / 2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError("area_triangle_three_sides() only accepts non-negative values" )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError("Given three sides do not form a triangle" )
UpperCamelCase__ = (sidea + sidea + sidea) / 2
UpperCamelCase__ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError("area_parallelogram() only accepts non-negative values" )
return base * height
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError("area_trapezium() only accepts non-negative values" )
return 1 / 2 * (basea + basea) * height
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius < 0:
raise ValueError("area_circle() only accepts non-negative values" )
return pi * radius**2
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError("area_ellipse() only accepts non-negative values" )
return pi * radius_x * radius_y
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : float , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError("area_rhombus() only accepts non-negative values" )
return 1 / 2 * diagonal_a * diagonal_a
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or sides < 3:
raise ValueError(
"area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides" )
elif length < 0:
raise ValueError(
"area_reg_polygon() only accepts non-negative values as \
length of a side" )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print("[DEMO] Areas of various geometric shapes: \n")
print(F"""Rectangle: {area_rectangle(10, 20) = }""")
print(F"""Square: {area_square(10) = }""")
print(F"""Triangle: {area_triangle(10, 10) = }""")
print(F"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(F"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(F"""Rhombus: {area_rhombus(10, 20) = }""")
print(F"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(F"""Circle: {area_circle(20) = }""")
print(F"""Ellipse: {area_ellipse(10, 20) = }""")
print("\nSurface Areas of various geometric shapes: \n")
print(F"""Cube: {surface_area_cube(20) = }""")
print(F"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(F"""Sphere: {surface_area_sphere(20) = }""")
print(F"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(F"""Cone: {surface_area_cone(10, 20) = }""")
print(F"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(F"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(F"""Torus: {surface_area_torus(20, 10) = }""")
print(F"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(F"""Square: {area_reg_polygon(4, 10) = }""")
print(F"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 31
|
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int | float | str , _UpperCamelCase : int | float | str ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = int(_UpperCamelCase )
UpperCamelCase__ = []
for temp in range(int(_UpperCamelCase ) ):
series.append(F'1 / {pow(temp + 1 , int(_UpperCamelCase ) )}' if series else "1" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowercase: Dict = int(input("Enter the last number (nth term) of the P-Series"))
__lowercase: Optional[int] = int(input("Enter the power for P-Series"))
print("Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p")
print(p_series(nth_term, power))
| 31
| 1
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : int = ''
_lowerCamelCase : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_lowerCamelCase : str = None # compression type in fsspec. ex: "gzip"
_lowerCamelCase : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str, a_ : str = "", a_ : Optional[str] = None, a_ : Optional[dict] = None, **a_ : Union[str, Any] ):
"""simple docstring"""
super().__init__(self, **a_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ = fsspec.open(
a_, mode="rb", protocol=a_, compression=self.compression, client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs", {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
UpperCamelCase__ = os.path.basename(self.file.path.split("::" )[0] )
UpperCamelCase__ = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ = None
@classmethod
def lowercase_ ( cls : Optional[Any], a_ : List[Any] ):
"""simple docstring"""
return super()._strip_protocol(a_ ).lstrip("/" )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
if self.dir_cache is None:
UpperCamelCase__ = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
UpperCamelCase__ = {f["name"]: f}
def lowercase_ ( self : int, a_ : str ):
"""simple docstring"""
return self.file.open().read()
def lowercase_ ( self : List[Any], a_ : str, a_ : str = "rb", a_ : Union[str, Any]=None, a_ : Any=True, a_ : Dict=None, **a_ : List[str], ):
"""simple docstring"""
UpperCamelCase__ = self._strip_protocol(a_ )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[int] = 'bz2'
_lowerCamelCase : List[Any] = 'bz2'
_lowerCamelCase : Optional[int] = '.bz2'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = 'gzip'
_lowerCamelCase : Any = 'gzip'
_lowerCamelCase : List[Any] = '.gz'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'lz4'
_lowerCamelCase : int = 'lz4'
_lowerCamelCase : Optional[int] = '.lz4'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Optional[Any] = 'xz'
_lowerCamelCase : Optional[Any] = 'xz'
_lowerCamelCase : Optional[Any] = '.xz'
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : List[str] = 'zstd'
_lowerCamelCase : int = 'zstd'
_lowerCamelCase : Union[str, Any] = '.zst'
def __init__( self : str, a_ : str, a_ : str = "rb", a_ : Optional[str] = None, a_ : Optional[dict] = None, a_ : int = DEFAULT_BLOCK_SIZE, **a_ : Tuple, ):
"""simple docstring"""
super().__init__(
fo=a_, mode=a_, target_protocol=a_, target_options=a_, block_size=a_, **a_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ = self.file.__enter__
class UpperCAmelCase :
def __init__( self : List[Any], a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = file_
def __enter__( self : Tuple ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Tuple, *a_ : str, **a_ : Any ):
"""simple docstring"""
self._file.__exit__(*a_, **a_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : int, a_ : Tuple ):
"""simple docstring"""
return getattr(self._file, a_ )
def fixed_enter(*a_ : int, **a_ : Optional[Any] ):
return WrappedFile(_enter(*a_, **a_ ) )
UpperCamelCase__ = fixed_enter
| 31
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True})
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()})
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel})
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def lowercase_ ( self : str, a_ : Union[str, Any] ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column], a_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
UpperCamelCase__ = copy.deepcopy(self )
UpperCamelCase__ = self.label_schema.copy()
UpperCamelCase__ = features[self.label_column]
UpperCamelCase__ = label_schema
return task_template
@property
def lowercase_ ( self : Any ):
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 31
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : Tuple, a_ : Tuple, a_ : Any=7, a_ : Dict=3, a_ : Dict=10, a_ : Tuple=18, a_ : Tuple=30, a_ : int=400, a_ : str=True, a_ : Optional[int]=None, a_ : str=True, a_ : Union[str, Any]=[0.5, 0.5, 0.5], a_ : List[Any]=[0.5, 0.5, 0.5], a_ : List[Any]=None, ):
"""simple docstring"""
UpperCamelCase__ = size if size is not None else {"shortest_edge": 18}
UpperCamelCase__ = crop_size if crop_size is not None else {"height": 18, "width": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = num_frames
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean
UpperCamelCase__ = image_std
UpperCamelCase__ = crop_size
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Optional[Any] = VivitImageProcessor if is_vision_available() else None
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = VivitImageProcessingTester(self )
@property
def lowercase_ ( self : str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_, "image_mean" ) )
self.assertTrue(hasattr(a_, "image_std" ) )
self.assertTrue(hasattr(a_, "do_normalize" ) )
self.assertTrue(hasattr(a_, "do_resize" ) )
self.assertTrue(hasattr(a_, "do_center_crop" ) )
self.assertTrue(hasattr(a_, "size" ) )
def lowercase_ ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84 )
self.assertEqual(image_processor.size, {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84} )
def lowercase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, numpify=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def lowercase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_video_inputs(self.image_processor_tester, equal_resolution=a_, torchify=a_ )
for video in video_inputs:
self.assertIsInstance(a_, a_ )
self.assertIsInstance(video[0], torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(video_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
UpperCamelCase__ = image_processing(a_, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_videos.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
| 31
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__lowercase: str = random.Random()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int]=1.0 , _UpperCamelCase : Dict=None , _UpperCamelCase : List[str]=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
UpperCamelCase__ = global_rng
UpperCamelCase__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase):
def __init__( self : List[Any], a_ : List[str], a_ : Any=7, a_ : Dict=400, a_ : str=2000, a_ : List[Any]=24, a_ : int=24, a_ : int=0.0, a_ : Union[str, Any]=1_6000, a_ : Union[str, Any]=True, a_ : Optional[Any]=True, ):
"""simple docstring"""
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = min_seq_length
UpperCamelCase__ = max_seq_length
UpperCamelCase__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase__ = feature_size
UpperCamelCase__ = num_mel_bins
UpperCamelCase__ = padding_value
UpperCamelCase__ = sampling_rate
UpperCamelCase__ = return_attention_mask
UpperCamelCase__ = do_normalize
def lowercase_ ( self : Tuple ):
"""simple docstring"""
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase_ ( self : Optional[Any], a_ : Union[str, Any]=False, a_ : Optional[int]=False ):
"""simple docstring"""
def _flatten(a_ : Dict ):
return list(itertools.chain(*a_ ) )
if equal_length:
UpperCamelCase__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
UpperCamelCase__ = [np.asarray(a_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase):
_lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractionTester(self )
def lowercase_ ( self : Optional[int], a_ : Tuple ):
"""simple docstring"""
self.assertTrue(np.all(np.mean(a_, axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(a_, axis=0 ) - 1 ) < 1e-3 ) )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = [np.asarray(a_ ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase__ = feature_extractor(a_, padding=a_, return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase__ = feature_extractor(speech_inputs[0], return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(np_speech_inputs[0], return_tensors="np" ).input_features
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test batched
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase__ = np.asarray(a_ )
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
UpperCamelCase__ = feature_extractor(a_, return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(a_, a_ ):
self.assertTrue(np.allclose(a_, a_, atol=1e-3 ) )
def lowercase_ ( self : List[str] ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, padding=a_, max_length=a_, return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = ["longest", "max_length", "do_not_pad"]
UpperCamelCase__ = [None, 16, None]
for max_length, padding in zip(a_, a_ ):
UpperCamelCase__ = feature_extractor(
a_, max_length=a_, padding=a_, return_tensors="np", return_attention_mask=a_ )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = [np.sum(a_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase_ ( self : str ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="max_length", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=4, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 4, 24) )
UpperCamelCase__ = [floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
UpperCamelCase__ = feature_extractor(
a_, padding="longest", max_length=16, truncation=a_, return_tensors="np", return_attention_mask=a_, )
UpperCamelCase__ = inputs.input_features
UpperCamelCase__ = inputs.attention_mask
UpperCamelCase__ = np.sum(attention_mask == 1, axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape, (3, 6, 24) )
def lowercase_ ( self : Optional[Any] ):
"""simple docstring"""
import torch
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = np.random.rand(100, 32 ).astype(np.floataa )
UpperCamelCase__ = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase__ = feature_extractor.pad([{"input_features": inputs}], return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase_ ( self : List[str], a_ : int ):
"""simple docstring"""
from datasets import load_dataset
UpperCamelCase__ = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation" )
# automatic decoding with librispeech
UpperCamelCase__ = ds.sort("id" ).select(range(a_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowercase_ ( self : int ):
"""simple docstring"""
UpperCamelCase__ = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
UpperCamelCase__ = self._load_datasamples(1 )
UpperCamelCase__ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase__ = feature_extractor(a_, return_tensors="pt" ).input_features
self.assertEquals(input_features.shape, (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30], a_, atol=1e-4 ) )
| 31
| 1
|
'''simple docstring'''
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
__lowercase: Any = get_logger(__name__)
__lowercase: Optional[int] = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class UpperCAmelCase :
@add_start_docstrings(a_ )
def __call__( self : Any, a_ : jnp.ndarray, a_ : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase :
@add_start_docstrings(a_ )
def __call__( self : List[str], a_ : jnp.ndarray, a_ : jnp.ndarray ):
"""simple docstring"""
raise NotImplementedError(
f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
@add_start_docstrings(a_ )
def __call__( self : Union[str, Any], a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int, **a_ : Optional[int] ):
"""simple docstring"""
for processor in self:
UpperCamelCase__ = inspect.signature(processor.__call__ ).parameters
if len(a_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'Make sure that all the required parameters: {list(function_args.keys() )} for '
f'{processor.__class__} are passed to the logits processor.' )
UpperCamelCase__ = processor(a_, a_, a_, **a_ )
else:
UpperCamelCase__ = processor(a_, a_, a_ )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Tuple, a_ : float ):
"""simple docstring"""
if not isinstance(a_, a_ ) or not (temperature > 0):
raise ValueError(f'`temperature` has to be a strictly positive float, but is {temperature}' )
UpperCamelCase__ = temperature
def __call__( self : str, a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = scores / self.temperature
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[Any], a_ : float, a_ : float = -float("Inf" ), a_ : int = 1 ):
"""simple docstring"""
if not isinstance(a_, a_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'`top_p` has to be a float > 0 and < 1, but is {top_p}' )
if not isinstance(a_, a_ ) or (min_tokens_to_keep < 1):
raise ValueError(f'`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}' )
UpperCamelCase__ = top_p
UpperCamelCase__ = filter_value
UpperCamelCase__ = min_tokens_to_keep
def __call__( self : str, a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(a_, scores.shape[-1] )
UpperCamelCase__ = jnp.full_like(a_, self.filter_value )
UpperCamelCase__ = jax.nn.softmax(a_, axis=-1 ).cumsum(axis=-1 )
UpperCamelCase__ = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
UpperCamelCase__ = jnp.roll(a_, 1 )
score_mask |= score_mask.at[:, 0].set(a_ )
# min tokens to keep
UpperCamelCase__ = score_mask.at[:, : self.min_tokens_to_keep].set(a_ )
UpperCamelCase__ = jnp.where(a_, a_, a_ )
UpperCamelCase__ = jax.lax.sort_key_val(a_, a_ )[-1]
return next_scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Optional[Any], a_ : int, a_ : float = -float("Inf" ), a_ : int = 1 ):
"""simple docstring"""
if not isinstance(a_, a_ ) or top_k <= 0:
raise ValueError(f'`top_k` has to be a strictly positive integer, but is {top_k}' )
UpperCamelCase__ = max(a_, a_ )
UpperCamelCase__ = filter_value
def __call__( self : List[str], a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = scores.shape
UpperCamelCase__ = jnp.full(batch_size * vocab_size, self.filter_value )
UpperCamelCase__ = min(self.top_k, scores.shape[-1] ) # Safety check
UpperCamelCase__ , UpperCamelCase__ = lax.top_k(a_, a_ )
UpperCamelCase__ = jnp.broadcast_to((jnp.arange(a_ ) * vocab_size)[:, None], (batch_size, topk) ).flatten()
UpperCamelCase__ = topk_scores.flatten()
UpperCamelCase__ = topk_indices.flatten() + shift
UpperCamelCase__ = next_scores_flat.at[topk_indices_flat].set(a_ )
UpperCamelCase__ = next_scores_flat.reshape(a_, a_ )
return next_scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Union[str, Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = bos_token_id
def __call__( self : Optional[int], a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape, -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - 1 )
UpperCamelCase__ = jnp.where(a_, new_scores.at[:, self.bos_token_id].set(0 ), a_ )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[Any], a_ : int, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = max_length
UpperCamelCase__ = eos_token_id
def __call__( self : Optional[int], a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = jnp.full(scores.shape, -float("inf" ) )
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.max_length + 1 )
UpperCamelCase__ = jnp.where(a_, new_scores.at[:, self.eos_token_id].set(0 ), a_ )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : int, a_ : int, a_ : int ):
"""simple docstring"""
if not isinstance(a_, a_ ) or min_length < 0:
raise ValueError(f'`min_length` has to be a positive integer, but is {min_length}' )
if not isinstance(a_, a_ ) or eos_token_id < 0:
raise ValueError(f'`eos_token_id` has to be a positive integer, but is {eos_token_id}' )
UpperCamelCase__ = min_length
UpperCamelCase__ = eos_token_id
def __call__( self : str, a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.clip(cur_len - self.min_length, 0, 1 )
UpperCamelCase__ = jnp.where(a_, scores.at[:, self.eos_token_id].set(-float("inf" ) ), a_ )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : List[Any], a_ : Optional[int], a_ : List[Any] ):
"""simple docstring"""
UpperCamelCase__ = list(a_ )
UpperCamelCase__ = begin_index
def __call__( self : Tuple, a_ : Optional[Any], a_ : Union[str, Any], a_ : int ):
"""simple docstring"""
UpperCamelCase__ = 1 - jnp.bool_(cur_len - self.begin_index )
UpperCamelCase__ = jnp.where(a_, scores.at[:, self.begin_suppress_tokens].set(-float("inf" ) ), a_ )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : str, a_ : list ):
"""simple docstring"""
UpperCamelCase__ = list(a_ )
def __call__( self : Any, a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = scores.at[..., self.suppress_tokens].set(-float("inf" ) )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Union[str, Any], a_ : Tuple ):
"""simple docstring"""
UpperCamelCase__ = dict(a_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
UpperCamelCase__ = jnp.ones((max(force_token_map.keys() ) + 1), dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
UpperCamelCase__ = force_token_array.at[index].set(a_ )
UpperCamelCase__ = jnp.intaa(a_ )
def __call__( self : List[str], a_ : jnp.ndarray, a_ : jnp.ndarray, a_ : int ):
"""simple docstring"""
def _force_token(a_ : str ):
UpperCamelCase__ = scores.shape[0]
UpperCamelCase__ = self.force_token_array[generation_idx]
UpperCamelCase__ = jnp.ones_like(a_, dtype=scores.dtype ) * -float("inf" )
UpperCamelCase__ = jnp.zeros((batch_size, 1), dtype=scores.dtype )
UpperCamelCase__ = lax.dynamic_update_slice(a_, a_, (0, current_token) )
return new_scores
UpperCamelCase__ = lax.cond(
cur_len >= self.force_token_array.shape[0], lambda: scores, lambda: lax.cond(
self.force_token_array[cur_len] >= 0, lambda: _force_token(a_ ), lambda: scores, ), )
return scores
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Tuple, a_ : Optional[int], a_ : Tuple, a_ : int ):
"""simple docstring"""
UpperCamelCase__ = generate_config.eos_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id
UpperCamelCase__ = generate_config.no_timestamps_token_id + 1
UpperCamelCase__ = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(a_, "max_initial_timestamp_index" ):
UpperCamelCase__ = generate_config.max_initial_timestamp_index
else:
UpperCamelCase__ = model_config.vocab_size
if self.max_initial_timestamp_index is None:
UpperCamelCase__ = model_config.vocab_size
def __call__( self : Union[str, Any], a_ : List[str], a_ : str, a_ : str ):
"""simple docstring"""
UpperCamelCase__ = scores.at[:, self.no_timestamps_token_id].set(-float("inf" ) )
def handle_pairs(a_ : Any, a_ : Any ):
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) >= 1, a_, a_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin, True and last_was_timestamp, a_, )
UpperCamelCase__ = jnp.where((cur_len - self.begin_index) < 2, a_, a_ )
UpperCamelCase__ = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin, a_, a_, )
return jnp.where(
a_, jnp.where(
penultimate_was_timestamp > 0, scores_k.at[self.timestamp_begin :].set(-float("inf" ) ), scores_k.at[: self.eos_token_id].set(-float("inf" ) ), ), a_, )
UpperCamelCase__ = jax.vmap(a_ )(a_, a_ )
UpperCamelCase__ = jnp.where(cur_len == self.begin_index, a_, a_ )
UpperCamelCase__ = jnp.where(
self.max_initial_timestamp_index is not None, True and apply_max_initial_timestamp, a_, )
UpperCamelCase__ = self.timestamp_begin + self.max_initial_timestamp_index
UpperCamelCase__ = jnp.where(
a_, scores.at[:, last_allowed + 1 :].set(-float("inf" ) ), a_, )
# if sum of probability over timestamps is above any other token, sample timestamp
UpperCamelCase__ = jax.nn.log_softmax(a_, axis=-1 )
def handle_cumulative_probs(a_ : Optional[Any], a_ : Any ):
UpperCamelCase__ = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1 )
UpperCamelCase__ = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob, scores_k.at[: self.timestamp_begin].set(-float("inf" ) ), a_, )
UpperCamelCase__ = jax.vmap(a_ )(a_, a_ )
return scores
| 31
|
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.mean(1 )
# Centralize the data of class i
UpperCamelCase__ = data - column_reshape(_UpperCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_UpperCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
UpperCamelCase__ = features.mean(1 )
UpperCamelCase__ = np.nan
for i in range(_UpperCamelCase ):
UpperCamelCase__ = features[:, labels == i]
UpperCamelCase__ = data.shape[1]
UpperCamelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase__ = device_data * np.dot(
column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase ) , (column_reshape(_UpperCamelCase ) - column_reshape(_UpperCamelCase )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
UpperCamelCase__ = features.mean(1 )
# Center the dataset
UpperCamelCase__ = features - np.reshape(_UpperCamelCase , (data_mean.size, 1) )
UpperCamelCase__ = np.dot(_UpperCamelCase , centered_data.T ) / features.shape[1]
UpperCamelCase__ , UpperCamelCase__ = np.linalg.eigh(_UpperCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase__ = np.dot(filtered_eigenvectors.T , _UpperCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : int , _UpperCamelCase : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase__ , UpperCamelCase__ = eigh(
covariance_between_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , covariance_within_classes(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , )
UpperCamelCase__ = eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = np.linalg.svd(_UpperCamelCase )
UpperCamelCase__ = svd_matrix[:, 0:dimensions]
UpperCamelCase__ = np.dot(filtered_svd_matrix.T , _UpperCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_UpperCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase__ = np.array([0, 0, 0, 1, 1] )
UpperCamelCase__ = 2
UpperCamelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = linear_discriminant_analysis(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if isinstance(_UpperCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__( ) -> None:
'''simple docstring'''
UpperCamelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase__ = 2
UpperCamelCase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(_UpperCamelCase ) as error_info:
UpperCamelCase__ = principal_component_analysis(_UpperCamelCase , _UpperCamelCase )
if not np.allclose(_UpperCamelCase , _UpperCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 31
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : int ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_UpperCamelCase , _UpperCamelCase )
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Any ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(_UpperCamelCase , _UpperCamelCase , bias=_UpperCamelCase )
UpperCamelCase__ = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = torch.load(_UpperCamelCase , map_location="cpu" )
UpperCamelCase__ = mam_aaa["args"] or mam_aaa["cfg"]["model"]
UpperCamelCase__ = mam_aaa["model"]
remove_ignore_keys_(_UpperCamelCase )
UpperCamelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCamelCase__ = MaMaaaConfig(
vocab_size=_UpperCamelCase , max_position_embeddings=10_24 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
UpperCamelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCamelCase__ = MaMaaaForConditionalGeneration(_UpperCamelCase )
model.model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
UpperCamelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowercase: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__lowercase: List[Any] = parser.parse_args()
__lowercase: Any = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 31
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowercase: Optional[List[str]] = None
__lowercase: List[Any] = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowercase: Tuple = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class UpperCAmelCase :
_lowerCamelCase : bool = True
_lowerCamelCase : Optional[str] = None
# Automatically constructed
_lowerCamelCase : ClassVar[str] = "PIL.Image.Image"
_lowerCamelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
_lowerCamelCase : str = field(default='Image' , init=SCREAMING_SNAKE_CASE__ , repr=SCREAMING_SNAKE_CASE__)
def __call__( self : Union[str, Any] ):
"""simple docstring"""
return self.pa_type
def lowercase_ ( self : Optional[Any], a_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
"""simple docstring"""
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(a_, a_ ):
UpperCamelCase__ = np.array(a_ )
if isinstance(a_, a_ ):
return {"path": value, "bytes": None}
elif isinstance(a_, a_ ):
return {"path": None, "bytes": value}
elif isinstance(a_, np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(a_ )
elif isinstance(a_, PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(a_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def lowercase_ ( self : Dict, a_ : dict, a_ : Dict=None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
UpperCamelCase__ = {}
UpperCamelCase__ , UpperCamelCase__ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(a_ ):
UpperCamelCase__ = PIL.Image.open(a_ )
else:
UpperCamelCase__ = path.split("::" )[-1]
try:
UpperCamelCase__ = string_to_dict(a_, config.HUB_DATASETS_URL )["repo_id"]
UpperCamelCase__ = token_per_repo_id.get(a_ )
except ValueError:
UpperCamelCase__ = None
with xopen(a_, "rb", use_auth_token=a_ ) as f:
UpperCamelCase__ = BytesIO(f.read() )
UpperCamelCase__ = PIL.Image.open(bytes_ )
else:
UpperCamelCase__ = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def lowercase_ ( self : List[str] ):
"""simple docstring"""
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def lowercase_ ( self : List[Any], a_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
UpperCamelCase__ = storage.field("bytes" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
UpperCamelCase__ = storage.field("path" )
else:
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCamelCase__ = pa.array(
[encode_np_array(np.array(a_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), )
UpperCamelCase__ = pa.array([None] * len(a_ ), type=pa.string() )
UpperCamelCase__ = pa.StructArray.from_arrays(
[bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def lowercase_ ( self : str, a_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(a_ : Dict ):
with xopen(a_, "rb" ) as f:
UpperCamelCase__ = f.read()
return bytes_
UpperCamelCase__ = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
], type=pa.binary(), )
UpperCamelCase__ = pa.array(
[os.path.basename(a_ ) if path is not None else None for path in storage.field("path" ).to_pylist()], type=pa.string(), )
UpperCamelCase__ = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() )
return array_cast(a_, self.pa_type )
def SCREAMING_SNAKE_CASE__( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCamelCase__ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
UpperCamelCase__ = BytesIO()
if image.format in list_image_compression_formats():
UpperCamelCase__ = image.format
else:
UpperCamelCase__ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(_UpperCamelCase , format=_UpperCamelCase )
return buffer.getvalue()
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(_UpperCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
UpperCamelCase__ = array.dtype
UpperCamelCase__ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
UpperCamelCase__ = dtype.kind
UpperCamelCase__ = dtype.itemsize
UpperCamelCase__ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCamelCase__ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCamelCase__ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCamelCase__ = dtype_byteorder + dtype_kind + str(_UpperCamelCase )
UpperCamelCase__ = np.dtype(_UpperCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
UpperCamelCase__ = PIL.Image.fromarray(array.astype(_UpperCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_UpperCamelCase )}
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
UpperCamelCase__ , UpperCamelCase__ = first_non_null_value(_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_UpperCamelCase , np.ndarray ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
elif isinstance(_UpperCamelCase , PIL.Image.Image ):
UpperCamelCase__ = no_op_if_value_is_null(_UpperCamelCase )
return [obj_to_image_dict_func(_UpperCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 31
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.