code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_electra""": ["""ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ElectraConfig""", """ElectraOnnxConfig"""],
"""tokenization_electra""": ["""ElectraTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""ElectraTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ElectraForCausalLM""",
"""ElectraForMaskedLM""",
"""ElectraForMultipleChoice""",
"""ElectraForPreTraining""",
"""ElectraForQuestionAnswering""",
"""ElectraForSequenceClassification""",
"""ElectraForTokenClassification""",
"""ElectraModel""",
"""ElectraPreTrainedModel""",
"""load_tf_weights_in_electra""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFElectraForMaskedLM""",
"""TFElectraForMultipleChoice""",
"""TFElectraForPreTraining""",
"""TFElectraForQuestionAnswering""",
"""TFElectraForSequenceClassification""",
"""TFElectraForTokenClassification""",
"""TFElectraModel""",
"""TFElectraPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxElectraForCausalLM""",
"""FlaxElectraForMaskedLM""",
"""FlaxElectraForMultipleChoice""",
"""FlaxElectraForPreTraining""",
"""FlaxElectraForQuestionAnswering""",
"""FlaxElectraForSequenceClassification""",
"""FlaxElectraForTokenClassification""",
"""FlaxElectraModel""",
"""FlaxElectraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance < 0:
raise ValueError('Resistance cannot be negative' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
# flake8: noqa
# Lint as: python3
lowercase_ = [
'VerificationMode',
'Version',
'disable_progress_bar',
'enable_progress_bar',
'is_progress_bar_enabled',
'experimental',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "x" , _SCREAMING_SNAKE_CASE = 10**-10 , _SCREAMING_SNAKE_CASE = 1 , ) -> complex:
lowercase__ = symbols(_SCREAMING_SNAKE_CASE )
lowercase__ = lambdify(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = lambdify(_SCREAMING_SNAKE_CASE , diff(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowercase__ = starting_point
while True:
if diff_function(_SCREAMING_SNAKE_CASE ) != 0:
lowercase__ = prev_guess - multiplicity * func(_SCREAMING_SNAKE_CASE ) / diff_function(
_SCREAMING_SNAKE_CASE )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
lowercase__ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = tempfile.mkdtemp()
# fmt: off
lowercase__ = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowercase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase__ = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowercase__ = {'unk_token': '<unk>'}
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowercase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
lowercase__ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
lowercase__ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **a : Optional[Any] )-> List[Any]:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict , **a : Dict )-> List[str]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **a : Dict )-> Optional[Any]:
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase__ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = self.get_image_processor()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_slow.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ )
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor_fast.save_pretrained(self.tmpdirname )
lowercase__ = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase__ = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase__ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ )
lowercase__ = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowercase__ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase__ = 'lower newer'
lowercase__ = processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='np' )
lowercase__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase__ = 'lower newer'
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ = ['cat', 'nasa badge']
lowercase__ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase__ = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ = [['cat', 'nasa badge'], ['person']]
lowercase__ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase__ = 16
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
lowercase__ = max([len(SCREAMING_SNAKE_CASE_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = 'google/owlvit-base-patch32'
lowercase__ = OwlViTProcessor.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowercase__ = ['cat', 'nasa badge']
lowercase__ = processor(text=SCREAMING_SNAKE_CASE_ )
lowercase__ = 16
lowercase__ = inputs['input_ids']
lowercase__ = [
[49_406, 2_368, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49_406, 6_841, 11_301, 49_407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase__ = self.prepare_image_inputs()
lowercase__ = self.prepare_image_inputs()
lowercase__ = processor(images=SCREAMING_SNAKE_CASE_ , query_images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
processor()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.get_image_processor()
lowercase__ = self.get_tokenizer()
lowercase__ = OwlViTProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
lowercase__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase__ = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
lowercase__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (_snake_case ):
_UpperCamelCase : int = """audio-spectrogram-transformer"""
def __init__( self : List[Any] , a : Optional[int]=768 , a : int=12 , a : int=12 , a : List[Any]=3_072 , a : Optional[Any]="gelu" , a : int=0.0 , a : Optional[Any]=0.0 , a : str=0.02 , a : Union[str, Any]=1E-1_2 , a : Tuple=16 , a : List[Any]=True , a : str=10 , a : Optional[Any]=10 , a : Dict=1_024 , a : Dict=128 , **a : Tuple , )-> Dict:
"""simple docstring"""
super().__init__(**snake_case_ )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = patch_size
lowercase__ = qkv_bias
lowercase__ = frequency_stride
lowercase__ = time_stride
lowercase__ = max_length
lowercase__ = num_mel_bins
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (lowercase__ , unittest.TestCase ):
_UpperCamelCase : int = DDIMPipeline
_UpperCamelCase : Optional[int] = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCamelCase : Any = PipelineTesterMixin.required_optional_params - {
'num_images_per_prompt',
'latents',
'callback',
'callback_steps',
}
_UpperCamelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCamelCase : Any = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
lowercase__ = DDIMScheduler()
lowercase__ = {'''unet''': unet, '''scheduler''': scheduler}
return components
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Dict=0 )-> Dict:
"""simple docstring"""
if str(UpperCAmelCase__ ).startswith('mps' ):
lowercase__ = torch.manual_seed(UpperCAmelCase__ )
else:
lowercase__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowercase__ = {
'''batch_size''': 1,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = '''cpu'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase__ = self.get_dummy_inputs(UpperCAmelCase__ )
lowercase__ = pipe(**UpperCAmelCase__ ).images
lowercase__ = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
lowercase__ = np.array(
[1.0_0_0E0_0, 5.7_1_7E-0_1, 4.7_1_7E-0_1, 1.0_0_0E0_0, 0.0_0_0E0_0, 1.0_0_0E0_0, 3.0_0_0E-0_4, 0.0_0_0E0_0, 9.0_0_0E-0_4] )
lowercase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ , 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[int]:
"""simple docstring"""
super().test_save_load_local(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Union[str, Any]:
"""simple docstring"""
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = '''google/ddpm-cifar10-32'''
lowercase__ = UNetaDModel.from_pretrained(UpperCAmelCase__ )
lowercase__ = DDIMScheduler()
lowercase__ = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddim.to(UpperCAmelCase__ )
ddim.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = ddim(generator=UpperCAmelCase__ , eta=0.0 , output_type='numpy' ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = '''google/ddpm-ema-bedroom-256'''
lowercase__ = UNetaDModel.from_pretrained(UpperCAmelCase__ )
lowercase__ = DDIMScheduler.from_pretrained(UpperCAmelCase__ )
lowercase__ = DDIMPipeline(unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ )
ddpm.to(UpperCAmelCase__ )
ddpm.set_progress_bar_config(disable=UpperCAmelCase__ )
lowercase__ = torch.manual_seed(0 )
lowercase__ = ddpm(generator=UpperCAmelCase__ , output_type='numpy' ).images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
lowercase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
# Make sure the supplied data is a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
lowercase__ = ''.join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
lowercase__ = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b'=' * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
lowercase__ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase__ = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
lowercase__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''.join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''.join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Any = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : int = False
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(__lowerCamelCase )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : Dict=0 )-> List[str]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('mps' ):
lowercase__ = torch.manual_seed(__lowerCamelCase )
else:
lowercase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowercase__ = {
'''prompt''': '''.''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 1.0,
'''sag_scale''': 1.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(__lowerCamelCase )
sag_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowercase__ = '''.'''
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=__lowerCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Any = field(
metadata={'help': 'The output directory where the model will be written.'} , )
_UpperCamelCase : Dict = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
_UpperCamelCase : Tuple = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
_UpperCamelCase : str = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
_UpperCamelCase : Tuple = field(
default=UpperCAmelCase_ , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def __UpperCamelCase () -> Any:
lowercase__ = HfArgumentParser((ModelArguments,) )
((lowercase__ ) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
lowercase__ = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
lowercase__ = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
lowercase__ = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
lowercase__ = True
lowercase__ = True
lowercase__ = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCamelCase__ , decoder_config=UpperCamelCase__ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
lowercase__ = decoder_config.decoder_start_token_id
lowercase__ = decoder_config.pad_token_id
if decoder_start_token_id is None:
lowercase__ = decoder_config.bos_token_id
if pad_token_id is None:
lowercase__ = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
lowercase__ = decoder_config.eos_token_id
lowercase__ = decoder_start_token_id
lowercase__ = pad_token_id
lowercase__ = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
lowercase__ = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
lowercase__ = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
lowercase_ = """src/diffusers"""
lowercase_ = """."""
# This is to make sure the diffusers module imported is the one in the repo.
lowercase_ = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
lowercase_ = spec.loader.load_module()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , __SCREAMING_SNAKE_CASE ) is not None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = object_name.split('.' )
lowercase__ = 0
# First let's find the module where our object lives.
lowercase__ = parts[i]
while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , F"""{module}.py""" ) ):
i += 1
if i < len(__SCREAMING_SNAKE_CASE ):
lowercase__ = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] )
if i >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__SCREAMING_SNAKE_CASE , F"""{module}.py""" ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
# Now let's find the class / func in the code!
lowercase__ = ""
lowercase__ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(RF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
lowercase__ = line_index
while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ = lines[start_index:line_index]
return "".join(__SCREAMING_SNAKE_CASE )
lowercase_ = re.compile(R"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
lowercase_ = re.compile(R"""^\s*(\S+)->(\S+)(\s+.*|$)""")
lowercase_ = re.compile(R"""<FILL\s+[^>]*>""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = code.split('\n' )
lowercase__ = 0
while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__SCREAMING_SNAKE_CASE ):
return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0
if has_indent:
lowercase__ = F"""class Bla:\n{code}"""
lowercase__ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__SCREAMING_SNAKE_CASE )
lowercase__ = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE )
lowercase__ = style_docstrings_in_code(__SCREAMING_SNAKE_CASE )
return result[len('class Bla:\n' ) :] if has_indent else result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
lowercase__ = f.readlines()
lowercase__ = []
lowercase__ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__SCREAMING_SNAKE_CASE ):
lowercase__ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
lowercase__ = search.groups()
lowercase__ = find_code_in_diffusers(__SCREAMING_SNAKE_CASE )
lowercase__ = get_indent(__SCREAMING_SNAKE_CASE )
lowercase__ = line_index + 1 if indent == theoretical_indent else line_index + 2
lowercase__ = theoretical_indent
lowercase__ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
lowercase__ = True
while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue:
line_index += 1
if line_index >= len(__SCREAMING_SNAKE_CASE ):
break
lowercase__ = lines[line_index]
lowercase__ = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(F"""^{indent}# End copy""" , __SCREAMING_SNAKE_CASE ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
lowercase__ = lines[start_index:line_index]
lowercase__ = "".join(__SCREAMING_SNAKE_CASE )
# Remove any nested `Copied from` comments to avoid circular copies
lowercase__ = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None]
lowercase__ = "\n".join(__SCREAMING_SNAKE_CASE )
# Before comparing, use the `replace_pattern` on the original code.
if len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = replace_pattern.replace('with' , '' ).split(',' )
lowercase__ = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
lowercase__ = pattern.groups()
lowercase__ = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if option.strip() == "all-casing":
lowercase__ = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE )
lowercase__ = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
lowercase__ = blackify(lines[start_index - 1] + theoretical_code )
lowercase__ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
lowercase__ = lines[:start_index] + [theoretical_code] + lines[line_index:]
lowercase__ = start_index + 1
if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__SCREAMING_SNAKE_CASE )
return diffs
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = False ) -> Any:
lowercase__ = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=__SCREAMING_SNAKE_CASE )
lowercase__ = []
for filename in all_files:
lowercase__ = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = "\n".join(__SCREAMING_SNAKE_CASE )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowercase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class SCREAMING_SNAKE_CASE (_UpperCAmelCase ):
_UpperCamelCase : List[Any] = None
_UpperCamelCase : Optional[Any] = None
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
return self.feat_extract_tester.prepare_feat_extract_dict()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A_ , 'feature_size' ) )
self.assertTrue(hasattr(A_ , 'sampling_rate' ) )
self.assertTrue(hasattr(A_ , 'padding_value' ) )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(A_ ) == len(A_ ) for x, y in zip(A_ , processed_features[input_name] ) ) )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='np' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='pt' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=A_ )
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} , tensor_type='tf' )
lowercase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Union[str, Any]=False )-> Optional[Any]:
"""simple docstring"""
def _inputs_have_equal_length(a : int ):
lowercase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(a : int , a : Optional[int] ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = self.feat_extract_tester.seq_length_diff
lowercase__ = self.feat_extract_tester.max_seq_length + pad_diff
lowercase__ = self.feat_extract_tester.min_seq_length
lowercase__ = self.feat_extract_tester.batch_size
lowercase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase__ = feat_extract.pad(A_ , padding=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[-1] ) )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
lowercase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' )[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = feat_extract.pad(A_ , pad_to_multiple_of=10 )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , pad_to_multiple_of=10 )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , pad_to_multiple_of=10 , max_length=A_ , return_tensors='np' , )
lowercase__ = input_a[input_name]
self.assertTrue(all(len(A_ ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
lowercase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(A_ ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Optional[int]=False )-> Dict:
"""simple docstring"""
def _inputs_have_equal_length(a : Any ):
lowercase__ = len(input[0] )
for input_slice in input[1:]:
if len(A_ ) != length:
return False
return True
def _inputs_are_equal(a : str , a : str ):
if len(A_ ) != len(A_ ):
return False
for input_slice_a, input_slice_a in zip(A_ , A_ ):
if not np.allclose(np.asarray(A_ ) , np.asarray(A_ ) , atol=1E-3 ):
return False
return True
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=A_ )
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(A_ , padding='max_length' , max_length=len(speech_inputs[0] ) )
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to smallest with np
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=A_ , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
# truncate to middle
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ , return_tensors='np' , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=A_ )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' )
lowercase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertTrue(_inputs_are_equal(A_ , A_ ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(A_ ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='longest' , truncation=A_ )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(A_ ):
feat_extract.pad(A_ , padding='max_length' , truncation=A_ )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase__ = 12
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , truncation=A_ , )
lowercase__ = input_a[input_name]
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=A_ , )
lowercase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(A_ ) )
self.assertFalse(_inputs_have_equal_length(A_ ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
self._check_padding(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
self._check_padding(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
self._check_truncation(numpify=A_ )
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
self._check_truncation(numpify=A_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='pt' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = self.feature_extraction_class(**self.feat_extract_dict )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )[input_name]
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='tf' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**A_ )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(A_ ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = feat_extract.pad(A_ , padding='longest' , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.feat_extract_dict
lowercase__ = True
lowercase__ = self.feature_extraction_class(**A_ )
lowercase__ = self.feat_extract_tester.prepare_inputs_for_common()
lowercase__ = [len(A_ ) for x in speech_inputs]
lowercase__ = feat_extract.model_input_names[0]
lowercase__ = BatchFeature({input_name: speech_inputs} )
lowercase__ = min(A_ )
lowercase__ = feat_extract.pad(
A_ , padding='max_length' , max_length=A_ , truncation=A_ , return_tensors='np' )
self.assertIn('attention_mask' , A_ )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __UpperCamelCase () -> Dict:
lowercase__ = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=_lowerCamelCase )
lowercase__ = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(_lowerCamelCase )
EnvironmentCommand.register_subcommand(_lowerCamelCase )
TestCommand.register_subcommand(_lowerCamelCase )
RunBeamCommand.register_subcommand(_lowerCamelCase )
DummyDataCommand.register_subcommand(_lowerCamelCase )
# Parse args
lowercase__ = parser.parse_known_args()
if not hasattr(_lowerCamelCase , 'func' ):
parser.print_help()
exit(1 )
lowercase__ = parse_unknown_args(_lowerCamelCase )
# Run
lowercase__ = args.func(_lowerCamelCase , **_lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
from manim import *
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = Rectangle(height=0.5 , width=0.5 )
lowercase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
lowercase__ = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
lowercase__ = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
lowercase__ = Text('CPU' , font_size=24 )
lowercase__ = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
lowercase__ = [mem.copy() for i in range(1 )]
lowercase__ = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
lowercase__ = Text('GPU' , font_size=24 )
lowercase__ = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.align_to(__UpperCamelCase , __UpperCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCamelCase )
lowercase__ = [mem.copy() for i in range(6 )]
lowercase__ = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
lowercase__ = Text('Model' , font_size=24 )
lowercase__ = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , )
lowercase__ = MarkupText(
f"""First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.""" , font_size=24 , )
lowercase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__ = MarkupText(
f"""<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase , run_time=2.5 ) , Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.add(__UpperCamelCase )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for i, rect in enumerate(__UpperCamelCase ):
lowercase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
cpu_target.move_to(__UpperCamelCase )
cpu_target.generate_target()
lowercase__ = 0.46 / 4
lowercase__ = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__UpperCamelCase , buff=0.0 )
cpu_targs.append(__UpperCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCamelCase ) )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from __future__ import annotations
from collections import deque
class SCREAMING_SNAKE_CASE :
def __init__( self : int , a : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = []
self.adlist.append(
{'value': '', 'next_states': [], 'fail_state': 0, 'output': []} )
for keyword in keywords:
self.add_keyword(a )
self.set_fail_transitions()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : int , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
for state in self.adlist[current_state]["next_states"]:
if char == self.adlist[state]["value"]:
return state
return None
def SCREAMING_SNAKE_CASE_ ( self : str , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = 0
for character in keyword:
lowercase__ = self.find_next_state(a , a )
if next_state is None:
self.adlist.append(
{
'value': character,
'next_states': [],
'fail_state': 0,
'output': [],
} )
self.adlist[current_state]["next_states"].append(len(self.adlist ) - 1 )
lowercase__ = len(self.adlist ) - 1
else:
lowercase__ = next_state
self.adlist[current_state]["output"].append(a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> int:
"""simple docstring"""
lowercase__ = deque()
for node in self.adlist[0]["next_states"]:
q.append(a )
lowercase__ = 0
while q:
lowercase__ = q.popleft()
for child in self.adlist[r]["next_states"]:
q.append(a )
lowercase__ = self.adlist[r]["""fail_state"""]
while (
self.find_next_state(a , self.adlist[child]['value'] ) is None
and state != 0
):
lowercase__ = self.adlist[state]["""fail_state"""]
lowercase__ = self.find_next_state(
a , self.adlist[child]['value'] )
if self.adlist[child]["fail_state"] is None:
lowercase__ = 0
lowercase__ = (
self.adlist[child]["""output"""]
+ self.adlist[self.adlist[child]["""fail_state"""]]["""output"""]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {} # returns a dict with keywords and list of its occurrences
lowercase__ = 0
for i in range(len(a ) ):
while (
self.find_next_state(a , string[i] ) is None
and current_state != 0
):
lowercase__ = self.adlist[current_state]["""fail_state"""]
lowercase__ = self.find_next_state(a , string[i] )
if next_state is None:
lowercase__ = 0
else:
lowercase__ = next_state
for key in self.adlist[current_state]["output"]:
if key not in result:
lowercase__ = []
result[key].append(i - len(a ) + 1 )
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowercase_ = """pt"""
elif is_tf_available():
lowercase_ = """tf"""
else:
lowercase_ = """jax"""
class SCREAMING_SNAKE_CASE (__A , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = ByTaTokenizer
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
super().setUp()
lowercase__ = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **a : Optional[int] )-> List[str]:
"""simple docstring"""
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : int , a : Tuple=False , a : Union[str, Any]=20 , a : Tuple=5 )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for i in range(len(UpperCamelCase__ ) ):
try:
lowercase__ = tokenizer.decode([i] , clean_up_tokenization_spaces=UpperCamelCase__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowercase__ = list(filter(lambda a : re.match(R'^[ a-zA-Z]+$' , t[1] ) , UpperCamelCase__ ) )
lowercase__ = list(filter(lambda a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=UpperCamelCase__ ) , UpperCamelCase__ ) )
if max_length is not None and len(UpperCamelCase__ ) > max_length:
lowercase__ = toks[:max_length]
if min_length is not None and len(UpperCamelCase__ ) < min_length and len(UpperCamelCase__ ) > 0:
while len(UpperCamelCase__ ) < min_length:
lowercase__ = toks + toks
# toks_str = [t[1] for t in toks]
lowercase__ = [t[0] for t in toks]
# Ensure consistency
lowercase__ = tokenizer.decode(UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
if " " not in output_txt and len(UpperCamelCase__ ) > 1:
lowercase__ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=UpperCamelCase__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=UpperCamelCase__ )
)
if with_prefix_space:
lowercase__ = ' ' + output_txt
lowercase__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
return output_txt, output_ids
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
lowercase__ = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = 'Unicode €.'
lowercase__ = tokenizer(UpperCamelCase__ )
lowercase__ = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
lowercase__ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , 'Unicode €.</s>' )
lowercase__ = tokenizer('e è é ê ë' )
lowercase__ = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , UpperCamelCase__ )
# decoding
lowercase__ = tokenizer.decode(UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
lowercase__ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
if FRAMEWORK != "jax":
lowercase__ = list(batch.input_ids.numpy()[0] )
else:
lowercase__ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase__ = tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , UpperCamelCase__ )
self.assertIn('attention_mask' , UpperCamelCase__ )
self.assertNotIn('decoder_input_ids' , UpperCamelCase__ )
self.assertNotIn('decoder_attention_mask' , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = [
'Summary of the text.',
'Another summary.',
]
lowercase__ = tokenizer(
text_target=UpperCamelCase__ , max_length=32 , padding='max_length' , truncation=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.ta_base_tokenizer
lowercase__ = ['A long paragraph for summarization. </s>']
lowercase__ = ['Summary of the text. </s>']
# fmt: off
lowercase__ = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
lowercase__ = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
lowercase__ = tokenizer(UpperCamelCase__ , text_target=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , batch['input_ids'][0] )
self.assertEqual(UpperCamelCase__ , batch['labels'][0] )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = ' He is very happy, UNwant\u00E9d,running'
lowercase__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowercase__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowercase__ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
shutil.rmtree(UpperCamelCase__ )
lowercase__ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Isolate this from the other tests because we save additional tokens/etc
lowercase__ = tempfile.mkdtemp()
lowercase__ = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
lowercase__ = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowercase__ = tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
tokenizer.save_pretrained(UpperCamelCase__ )
lowercase__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ )
lowercase__ = after_tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowercase__ = tokenizer.__class__.from_pretrained(UpperCamelCase__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowercase__ = json.load(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowercase__ = json.load(UpperCamelCase__ )
lowercase__ = [f"""<extra_id_{i}>""" for i in range(125 )]
lowercase__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
lowercase__ = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(UpperCamelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowercase__ = tokenizer_class.from_pretrained(
UpperCamelCase__ , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowercase__ = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=UpperCamelCase__ )]
lowercase__ = tokenizer_class.from_pretrained(
UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
lowercase__ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(UpperCamelCase__ )
lowercase__ = tokenizer_class.from_pretrained(UpperCamelCase__ )
self.assertTrue(tokenizer.decode([255] ) == '' )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.get_tokenizers(fast=UpperCamelCase__ , do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
lowercase__ = tokenizer.convert_tokens_to_string(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowercase__ = 0
lowercase__ = tokenizer.convert_ids_to_tokens(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
for attr in attributes_list:
setattr(UpperCamelCase__ , attr + '_id' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '_id' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , attr + '_id' , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
self.assertEqual(getattr(UpperCamelCase__ , attr + '_id' ) , UpperCamelCase__ )
setattr(UpperCamelCase__ , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens_ids' ) , [] )
setattr(UpperCamelCase__ , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(UpperCamelCase__ , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class SCREAMING_SNAKE_CASE (a__ ):
_UpperCamelCase : Union[str, Any] = "open-llama"
def __init__( self : Optional[Any] , a : Tuple=100_000 , a : List[Any]=4_096 , a : Optional[int]=11_008 , a : Tuple=32 , a : Optional[int]=32 , a : Optional[Any]="silu" , a : Any=2_048 , a : List[str]=0.02 , a : int=1E-6 , a : Optional[int]=True , a : Any=0 , a : Optional[int]=1 , a : Dict=2 , a : List[Any]=False , a : Tuple=True , a : Tuple=0.1 , a : Union[str, Any]=0.1 , a : str=True , a : Optional[int]=True , a : Optional[int]=None , **a : Optional[Any] , )-> List[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = rms_norm_eps
lowercase__ = use_cache
lowercase__ = kwargs.pop(
'use_memorry_efficient_attention' , lowerCamelCase_ )
lowercase__ = hidden_dropout_prob
lowercase__ = attention_dropout_prob
lowercase__ = use_stable_embedding
lowercase__ = shared_input_output_embedding
lowercase__ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f"""got {self.rope_scaling}""" )
lowercase__ = self.rope_scaling.get('type' , lowerCamelCase_ )
lowercase__ = self.rope_scaling.get('factor' , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class SCREAMING_SNAKE_CASE (lowercase_ ):
'''simple docstring'''
_UpperCamelCase : Dict = ['''pixel_values''']
def __init__( self : Any , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : Dict[str, int] = None , a : bool = True , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : int , )-> Any:
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
lowercase__ = size if size is not None else {'shortest_edge': 256}
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase__ = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = offset
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : int , )-> Dict:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" in size:
lowercase__ = get_resize_output_image_size(UpperCamelCase__ , size['shortest_edge'] , default_to_square=UpperCamelCase__ )
elif "height" in size and "width" in size:
lowercase__ = (size['height'], size['width'])
else:
raise ValueError(f"""Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}""" )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have \'height\' and \'width\' as keys. Got {size.keys()}""" )
return center_crop(UpperCamelCase__ , size=(size['height'], size['width']) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[int, float] , a : bool = True , a : Optional[Union[str, ChannelDimension]] = None , **a : str , )-> str:
"""simple docstring"""
lowercase__ = image.astype(np.floataa )
if offset:
lowercase__ = image - (scale / 2)
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , )-> Tuple:
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[ChannelDimension] = ChannelDimension.FIRST , )-> str:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
lowercase__ = to_numpy_array(UpperCamelCase__ )
if do_resize:
lowercase__ = self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ )
if do_center_crop:
lowercase__ = self.center_crop(UpperCamelCase__ , size=UpperCamelCase__ )
if do_rescale:
lowercase__ = self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ , offset=UpperCamelCase__ )
if do_normalize:
lowercase__ = self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ )
lowercase__ = to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ )
return image
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Tuple , )-> str:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = offset if offset is not None else self.offset
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(UpperCamelCase__ , param_name='crop_size' )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
lowercase__ = make_batched(UpperCamelCase__ )
lowercase__ = [
[
self._preprocess_image(
image=UpperCamelCase__ , do_resize=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , do_center_crop=UpperCamelCase__ , crop_size=UpperCamelCase__ , do_rescale=UpperCamelCase__ , rescale_factor=UpperCamelCase__ , offset=UpperCamelCase__ , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , data_format=UpperCamelCase__ , )
for img in video
]
for video in videos
]
lowercase__ = {'pixel_values': videos}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Tuple[int, ...]]:
lowercase__ = []
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__lowerCAmelCase ) )
elif isinstance(__lowerCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[int, ...]:
lowercase__ = []
for d in reversed(__lowerCAmelCase ):
idx.append(flat_idx % d )
lowercase__ = flat_idx // d
return tuple(reversed(__lowerCAmelCase ) )
@torch.jit.ignore
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ) -> List[Tuple[slice, ...]]:
def reduce_edge_list(_SCREAMING_SNAKE_CASE ) -> None:
lowercase__ = True
for i in range(len(__lowerCAmelCase ) ):
lowercase__ = -1 * (i + 1)
l[reversed_idx] &= tally
lowercase__ = l[reversed_idx]
if start_edges is None:
lowercase__ = [s == 0 for s in start]
reduce_edge_list(__lowerCAmelCase )
if end_edges is None:
lowercase__ = [e == (d - 1) for e, d in zip(__lowerCAmelCase , __lowerCAmelCase )]
reduce_edge_list(__lowerCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__lowerCAmelCase ) == 0:
return [()]
elif len(__lowerCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowercase__ = []
lowercase__ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__lowerCAmelCase , __lowerCAmelCase ):
if s == e:
path_list.append(slice(__lowerCAmelCase , s + 1 ) )
else:
break
lowercase__ = tuple(__lowerCAmelCase )
lowercase__ = len(__lowerCAmelCase )
# start == end, and we're done
if divergence_idx == len(__lowerCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = start[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowercase__ = end[divergence_idx]
return tuple(
path + (slice(__lowerCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowercase__ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> torch.Tensor:
lowercase__ = t.shape[:no_batch_dims]
lowercase__ = list(_flat_idx_to_idx(__lowerCAmelCase , __lowerCAmelCase ) )
# _get_minimal_slice_set is inclusive
lowercase__ = list(_flat_idx_to_idx(flat_end - 1 , __lowerCAmelCase ) )
# Get an ordered list of slices to perform
lowercase__ = _get_minimal_slice_set(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , )
lowercase__ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> Any:
if not (len(__lowerCAmelCase ) > 0):
raise ValueError('Must provide at least one input' )
lowercase__ = [shape[:no_batch_dims] for shape in _fetch_dims(__lowerCAmelCase )]
lowercase__ = tuple([max(__lowerCAmelCase ) for s in zip(*__lowerCAmelCase )] )
def _prep_inputs(_SCREAMING_SNAKE_CASE ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowercase__ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowercase__ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowercase__ = tensor_tree_map(_prep_inputs , __lowerCAmelCase )
lowercase__ = None
if _out is not None:
lowercase__ = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowercase__ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowercase__ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_SCREAMING_SNAKE_CASE ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowercase__ = 0
lowercase__ = prepped_outputs
for _ in range(__lowerCAmelCase ):
# Chunk the input
if not low_mem:
lowercase__ = _select_chunk
else:
lowercase__ = partial(
_chunk_slice , flat_start=__lowerCAmelCase , flat_end=min(__lowerCAmelCase , i + chunk_size ) , no_batch_dims=len(__lowerCAmelCase ) , )
lowercase__ = tensor_tree_map(__lowerCAmelCase , __lowerCAmelCase )
# Run the layer on the chunk
lowercase__ = layer(**__lowerCAmelCase )
# Allocate space for the output
if out is None:
lowercase__ = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __lowerCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
def assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
for k, v in da.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
assign(__lowerCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowercase__ = da[k]
assign(__lowerCAmelCase , __lowerCAmelCase )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
for xa, xa in zip(__lowerCAmelCase , __lowerCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowercase__ = xa
elif isinstance(__lowerCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowercase__ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowercase__ = tensor_tree_map(lambda _SCREAMING_SNAKE_CASE : t.view(orig_batch_dims + t.shape[1:] ) , __lowerCAmelCase )
return out
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : int = 512 , )-> Any:
"""simple docstring"""
lowercase__ = max_chunk_size
lowercase__ = None
lowercase__ = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Callable , a : tuple , a : int )-> str:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowercase__ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowercase__ = [c for c in candidates if c > min_chunk_size]
lowercase__ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(a : int ) -> bool:
try:
with torch.no_grad():
fn(*lowerCAmelCase_ , chunk_size=lowerCAmelCase_ )
return True
except RuntimeError:
return False
lowercase__ = 0
lowercase__ = len(lowerCAmelCase_ ) - 1
while i > min_viable_chunk_size_index:
lowercase__ = test_chunk_size(candidates[i] )
if not viable:
lowercase__ = (min_viable_chunk_size_index + i) // 2
else:
lowercase__ = i
lowercase__ = (i + len(lowerCAmelCase_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Iterable , a : Iterable )-> str:
"""simple docstring"""
lowercase__ = True
for aa, aa in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
assert type(lowerCAmelCase_ ) == type(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , (list, tuple) ):
consistent &= self._compare_arg_caches(lowerCAmelCase_ , lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
lowercase__ = [v for _, v in sorted(aa.items() , key=lambda a : x[0] )]
consistent &= self._compare_arg_caches(lowerCAmelCase_ , lowerCAmelCase_ )
else:
consistent &= aa == aa
return consistent
def SCREAMING_SNAKE_CASE_ ( self : int , a : Callable , a : tuple , a : int , )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = tree_map(lambda a : a.shape if isinstance(lowerCAmelCase_ , torch.Tensor ) else a , lowerCAmelCase_ , lowerCAmelCase_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(lowerCAmelCase_ )
lowercase__ = self._compare_arg_caches(self.cached_arg_data , lowerCAmelCase_ )
else:
# Otherwise, we can reuse the precomputed value
lowercase__ = False
if not consistent:
lowercase__ = self._determine_favorable_chunk_size(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
lowercase__ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
import os
import pytest
from attr import dataclass
lowercase_ = """us-east-1""" # defaults region
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[Any] = 42
_UpperCamelCase : Any = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
_UpperCamelCase : str = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_00,
'save_steps': 55_00,
}
_UpperCamelCase : int = {**hyperparameters, 'max_steps': 10_00}
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
return f"""{self.framework}-transfromers-test"""
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
return f"""./tests/sagemaker/scripts/{self.framework}"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : List[str] = 42
_UpperCamelCase : Optional[Any] = None
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : int , a : Tuple , a : List[str] , a : Union[str, Any] , **a : List[str] )-> int:
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : str )-> List[str]:
"""simple docstring"""
raise NotImplementedError
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any )-> Any:
"""simple docstring"""
return f"""`pip install {cls.pip_package or cls.name}`"""
class SCREAMING_SNAKE_CASE (_a ):
_UpperCamelCase : Tuple = 'optuna'
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
"""simple docstring"""
return is_optuna_available()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[int] , a : List[str] , **a : Union[str, Any] )-> Dict:
"""simple docstring"""
return run_hp_search_optuna(_A , _A , _A , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Any )-> Tuple:
"""simple docstring"""
return default_hp_space_optuna(_A )
class SCREAMING_SNAKE_CASE (_a ):
_UpperCamelCase : Union[str, Any] = 'ray'
_UpperCamelCase : Dict = '\'ray[tune]\''
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Optional[int]:
"""simple docstring"""
return is_ray_available()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[Any] , a : int , a : List[Any] , **a : Any )-> List[Any]:
"""simple docstring"""
return run_hp_search_ray(_A , _A , _A , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int )-> Optional[int]:
"""simple docstring"""
return default_hp_space_ray(_A )
class SCREAMING_SNAKE_CASE (_a ):
_UpperCamelCase : Optional[int] = 'sigopt'
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Dict:
"""simple docstring"""
return is_sigopt_available()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[Any] , a : str , a : Optional[Any] , **a : Dict )-> str:
"""simple docstring"""
return run_hp_search_sigopt(_A , _A , _A , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[Any] )-> List[str]:
"""simple docstring"""
return default_hp_space_sigopt(_A )
class SCREAMING_SNAKE_CASE (_a ):
_UpperCamelCase : str = 'wandb'
@staticmethod
def SCREAMING_SNAKE_CASE_ ( )-> Tuple:
"""simple docstring"""
return is_wandb_available()
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : List[str] , a : Any , **a : str )-> Any:
"""simple docstring"""
return run_hp_search_wandb(_A , _A , _A , **_A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
return default_hp_space_wandb(_A )
lowercase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def __UpperCamelCase () -> Tuple:
lowercase__ = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(UpperCAmelCase__ ) > 0:
lowercase__ = available_backends[0].name
if len(UpperCAmelCase__ ) > 1:
logger.info(
F"""{len(UpperCAmelCase__ )} hyperparameter search backends available. Using {name} as the default.""" )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F""" - To install {backend.name} run {backend.pip_install()}"""
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
from math import sqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 1000000 ) -> int:
lowercase__ = 0
lowercase__ = 0
lowercase__ = 42
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(UpperCAmelCase__ , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : List[Any] , a : List[str] , a : List[Any] , a : Tuple , a : str , a : Optional[int] , a : str , a : List[Any] , )-> List[str]:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=a , text_encoder=a , tokenizer=a , unet=a , scheduler=a , safety_checker=a , feature_extractor=a , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : str = "auto" )-> Any:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(a )
@torch.no_grad()
def __call__( self : Dict , a : Optional[int] , a : Tuple = 512 , a : str = 512 , a : Optional[int] = 50 , a : List[Any] = 7.5 , a : Dict = None , a : Any = 1 , a : Optional[int] = 0.0 , a : int = None , a : int = None , a : Union[str, Any] = "pil" , a : Dict = True , a : Optional[Any] = None , a : Optional[Any] = 1 , a : Any = None , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
if isinstance(a , a ):
lowercase__ = 1
elif isinstance(a , a ):
lowercase__ = len(a )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a , a ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(a )}.""" )
# get prompt text embeddings
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
lowercase__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
lowercase__ = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase__ , lowercase__ , lowercase__ = text_embeddings.shape
lowercase__ = text_embeddings.repeat(1 , a , 1 )
lowercase__ = text_embeddings.view(bs_embed * num_images_per_prompt , a , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = 42
if negative_prompt is None:
lowercase__ = ['']
elif type(a ) is not type(a ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(a )} !="""
f""" {type(a )}.""" )
elif isinstance(a , a ):
lowercase__ = [negative_prompt]
elif batch_size != len(a ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(a )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
lowercase__ = negative_prompt
lowercase__ = text_input_ids.shape[-1]
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=a , truncation=a , return_tensors='pt' , )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase__ = uncond_embeddings.shape[1]
lowercase__ = uncond_embeddings.repeat(a , a , 1 )
lowercase__ = uncond_embeddings.view(batch_size * num_images_per_prompt , a , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase__ = torch.randn(
a , generator=a , device='cpu' , dtype=a ).to(self.device )
lowercase__ = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ = torch.randn(
a , generator=a , device=self.device , dtype=a )
lowercase__ = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents_reference.to(self.device )
lowercase__ = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase__ = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase__ = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase__ = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase__ = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase__ = 0 if dx < 0 else dx
lowercase__ = 0 if dy < 0 else dy
lowercase__ = max(-dx , 0 )
lowercase__ = max(-dy , 0 )
# import pdb
# pdb.set_trace()
lowercase__ = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase__ = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
for i, t in enumerate(self.progress_bar(a ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ = self.unet(a , a , encoder_hidden_states=a ).sample
# perform guidance
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(a , a , a , **a ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a , a , a )
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(a ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
lowercase__ = self.feature_extractor(self.numpy_to_pil(a ) , return_tensors='pt' ).to(
self.device )
lowercase__ , lowercase__ = self.safety_checker(
images=a , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
lowercase__ = None
if output_type == "pil":
lowercase__ = self.numpy_to_pil(a )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=__UpperCAmelCase )
class SCREAMING_SNAKE_CASE (__UpperCAmelCase ):
_UpperCamelCase : Any = field(default='image-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_UpperCamelCase : List[Any] = Features({'image': Image()} )
_UpperCamelCase : int = Features({'labels': ClassLabel} )
_UpperCamelCase : int = 'image'
_UpperCamelCase : List[Any] = 'labels'
def SCREAMING_SNAKE_CASE_ ( self : int , a : Dict )-> Optional[Any]:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , a ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowercase__ = copy.deepcopy(self )
lowercase__ = self.label_schema.copy()
lowercase__ = features[self.label_column]
lowercase__ = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
return {
self.image_column: "image",
self.label_column: "labels",
}
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
from typing import TYPE_CHECKING
from ..utils import _LazyModule
lowercase_ = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if isinstance(snake_case_ , torch.Tensor ):
return image
elif isinstance(snake_case_ , PIL.Image.Image ):
lowercase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowercase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowercase__ = np.concatenate(snake_case_ , axis=0 )
lowercase__ = np.array(snake_case_ ).astype(np.floataa ) / 255.0
lowercase__ = image.transpose(0 , 3 , 1 , 2 )
lowercase__ = 2.0 * image - 1.0
lowercase__ = torch.from_numpy(snake_case_ )
elif isinstance(image[0] , torch.Tensor ):
lowercase__ = torch.cat(snake_case_ , dim=0 )
return image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9_9_9_5 ) -> List[Any]:
if not isinstance(snake_case_ , np.ndarray ):
lowercase__ = True
lowercase__ = va.device
lowercase__ = va.cpu().numpy()
lowercase__ = va.cpu().numpy()
lowercase__ = np.sum(va * va / (np.linalg.norm(snake_case_ ) * np.linalg.norm(snake_case_ )) )
if np.abs(snake_case_ ) > DOT_THRESHOLD:
lowercase__ = (1 - t) * va + t * va
else:
lowercase__ = np.arccos(snake_case_ )
lowercase__ = np.sin(snake_case_ )
lowercase__ = theta_a * t
lowercase__ = np.sin(snake_case_ )
lowercase__ = np.sin(theta_a - theta_t ) / sin_theta_a
lowercase__ = sin_theta_t / sin_theta_a
lowercase__ = sa * va + sa * va
if inputs_are_torch:
lowercase__ = torch.from_numpy(snake_case_ ).to(snake_case_ )
return va
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = F.normalize(snake_case_ , dim=-1 )
lowercase__ = F.normalize(snake_case_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
for param in model.parameters():
lowercase__ = value
class SCREAMING_SNAKE_CASE (UpperCamelCase_ ):
def __init__( self : List[str] , a : Tuple , a : Tuple , a : Union[str, Any] , a : Any , a : Optional[int] , a : List[str] , a : List[Any] , a : Optional[Any]=None , a : int=None , a : Tuple=None , )-> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=a , text_encoder=a , clip_model=a , tokenizer=a , unet=a , scheduler=a , feature_extractor=a , coca_model=a , coca_tokenizer=a , coca_transform=a , )
lowercase__ = (
feature_extractor.size
if isinstance(feature_extractor.size , a )
else feature_extractor.size['''shortest_edge''']
)
lowercase__ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , a )
set_requires_grad(self.clip_model , a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[Any] = "auto" )-> Dict:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
self.enable_attention_slicing(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
set_requires_grad(self.vae , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
set_requires_grad(self.vae , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
set_requires_grad(self.unet , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.unet , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = min(int(num_inference_steps * strength ) , a )
lowercase__ = max(num_inference_steps - init_timestep , 0 )
lowercase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Tuple , a : Dict , a : Dict , a : Tuple , a : List[Any] , a : Optional[Any]=None )-> Any:
"""simple docstring"""
if not isinstance(a , torch.Tensor ):
raise ValueError(f"""`image` has to be of type `torch.Tensor` but is {type(a )}""" )
lowercase__ = image.to(device=a , dtype=a )
if isinstance(a , a ):
lowercase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(a )
]
lowercase__ = torch.cat(a , dim=0 )
else:
lowercase__ = self.vae.encode(a ).latent_dist.sample(a )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ = 0.18215 * init_latents
lowercase__ = init_latents.repeat_interleave(a , dim=0 )
lowercase__ = randn_tensor(init_latents.shape , generator=a , device=a , dtype=a )
# get latents
lowercase__ = self.scheduler.add_noise(a , a , a )
lowercase__ = init_latents
return latents
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = self.coca_transform(a ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowercase__ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowercase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[Any] , a : int )-> List[str]:
"""simple docstring"""
lowercase__ = self.feature_extractor.preprocess(a )
lowercase__ = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowercase__ = self.clip_model.get_image_features(a )
lowercase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ = image_embeddings_clip.repeat_interleave(a , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : Union[str, Any] , a : int , a : List[Any] , a : Optional[int] , a : Dict , a : List[Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = latents.detach().requires_grad_()
lowercase__ = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ = self.unet(a , a , encoder_hidden_states=a ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowercase__ = self.scheduler.alphas_cumprod[timestep]
lowercase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowercase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowercase__ = torch.sqrt(a )
lowercase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , a ):
lowercase__ = self.scheduler.sigmas[index]
lowercase__ = latents - sigma * noise_pred
else:
raise ValueError(f"""scheduler type {type(self.scheduler )} not supported""" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ = 1 / 0.18215 * sample
lowercase__ = self.vae.decode(a ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = transforms.Resize(self.feature_extractor_size )(a )
lowercase__ = self.normalize(a ).to(latents.dtype )
lowercase__ = self.clip_model.get_image_features(a )
lowercase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=a )
lowercase__ = spherical_dist_loss(a , a ).mean() * clip_guidance_scale
lowercase__ = -torch.autograd.grad(a , a )[0]
if isinstance(self.scheduler , a ):
lowercase__ = latents.detach() + grads * (sigma**2)
lowercase__ = noise_pred_original
else:
lowercase__ = noise_pred_original - torch.sqrt(a ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self : int , a : int , a : Optional[int] , a : List[Any] = None , a : str = None , a : List[str] = 512 , a : Tuple = 512 , a : List[str] = 0.6 , a : List[str] = 50 , a : Optional[Any] = 7.5 , a : Dict = 1 , a : Optional[Any] = 0.0 , a : Any = 100 , a : Optional[int] = None , a : Union[str, Any] = "pil" , a : int = True , a : Optional[Any] = 0.8 , a : int = 0.1 , a : Union[str, Any] = 0.1 , )-> Optional[Any]:
"""simple docstring"""
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(f"""You have passed {batch_size} batch_size, but only {len(a )} generators.""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if isinstance(a , torch.Generator ) and batch_size > 1:
lowercase__ = [generator] + [None] * (batch_size - 1)
lowercase__ = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
lowercase__ = [x[0] for x in coca_is_none if x[1]]
lowercase__ = ''', '''.join(a )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(a ):
raise ValueError(
f"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
f"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ = self.get_image_description(a )
if style_prompt is None:
if len(a ):
raise ValueError(
f"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
f""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""" )
lowercase__ = self.get_image_description(a )
# get prompt text embeddings for content and style
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowercase__ = self.tokenizer(
a , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=a , return_tensors='pt' , )
lowercase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowercase__ = slerp(a , a , a )
# duplicate text embeddings for each generation per prompt
lowercase__ = text_embeddings.repeat_interleave(a , dim=0 )
# set timesteps
lowercase__ = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowercase__ = {}
if accepts_offset:
lowercase__ = 1
self.scheduler.set_timesteps(a , **a )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowercase__ = self.get_timesteps(a , a , self.device )
lowercase__ = timesteps[:1].repeat(a )
# Preprocess image
lowercase__ = preprocess(a , a , a )
lowercase__ = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ = preprocess(a , a , a )
lowercase__ = self.prepare_latents(
a , a , a , text_embeddings.dtype , self.device , a )
lowercase__ = slerp(a , a , a )
if clip_guidance_scale > 0:
lowercase__ = self.get_clip_image_embeddings(a , a )
lowercase__ = self.get_clip_image_embeddings(a , a )
lowercase__ = slerp(
a , a , a )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase__ = content_text_input.input_ids.shape[-1]
lowercase__ = self.tokenizer([''] , padding='max_length' , max_length=a , return_tensors='pt' )
lowercase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowercase__ = uncond_embeddings.repeat_interleave(a , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowercase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowercase__ = torch.randn(a , generator=a , device='cpu' , dtype=a ).to(
self.device )
else:
lowercase__ = torch.randn(a , generator=a , device=self.device , dtype=a )
else:
if latents.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
lowercase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowercase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase__ = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowercase__ = {}
if accepts_eta:
lowercase__ = eta
# check if the scheduler accepts generator
lowercase__ = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowercase__ = generator
with self.progress_bar(total=a ):
for i, t in enumerate(a ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowercase__ = self.unet(a , a , encoder_hidden_states=a ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowercase__ = noise_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowercase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowercase__ = self.cond_fn(
a , a , a , a , a , a , a , )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(a , a , a , **a ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowercase__ = 1 / 0.18215 * latents
lowercase__ = self.vae.decode(a ).sample
lowercase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(a )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=a , nsfw_content_detected=a )
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> Any:
if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif electron_conc < 0:
raise ValueError('Electron concentration cannot be negative in a semiconductor' )
elif hole_conc < 0:
raise ValueError('Hole concentration cannot be negative in a semiconductor' )
elif intrinsic_conc < 0:
raise ValueError(
'Intrinsic concentration cannot be negative in a semiconductor' )
elif electron_conc == 0:
return (
"electron_conc",
intrinsic_conc**2 / hole_conc,
)
elif hole_conc == 0:
return (
"hole_conc",
intrinsic_conc**2 / electron_conc,
)
elif intrinsic_conc == 0:
return (
"intrinsic_conc",
(electron_conc * hole_conc) ** 0.5,
)
else:
return (-1, -1)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("""9.1.0"""):
lowercase_ = {
"""linear""": PIL.Image.Resampling.BILINEAR,
"""bilinear""": PIL.Image.Resampling.BILINEAR,
"""bicubic""": PIL.Image.Resampling.BICUBIC,
"""lanczos""": PIL.Image.Resampling.LANCZOS,
"""nearest""": PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
"""linear""": PIL.Image.LINEAR,
"""bilinear""": PIL.Image.BILINEAR,
"""bicubic""": PIL.Image.BICUBIC,
"""lanczos""": PIL.Image.LANCZOS,
"""nearest""": PIL.Image.NEAREST,
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = (images / 2 + 0.5).clamp(0 , 1 )
lowercase__ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase__ = numpy_to_pil(lowerCamelCase__ )
return images
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
if images.ndim == 3:
lowercase__ = images[None, ...]
lowercase__ = (images * 255).round().astype('uint8' )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
lowercase__ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images]
else:
lowercase__ = [Image.fromarray(lowerCamelCase__ ) for image in images]
return pil_images
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
def __UpperCamelCase () -> int:
lowercase__ = 0
for i in range(1 , 1001 ):
total += i**i
return str(_SCREAMING_SNAKE_CASE )[-10:]
if __name__ == "__main__":
print(solution())
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[Any] , a : Optional[Any] , a : str=13 , a : Optional[int]=7 , a : Dict=True , a : List[str]=True , a : Any=True , a : Optional[int]=True , a : Tuple=99 , a : List[str]=32 , a : Optional[Any]=5 , a : List[Any]=4 , a : Tuple=4 , a : Dict="gelu" , a : str=0.0 , a : int=0.1 , a : str=True , a : List[str]=512 , a : Any=16 , a : Optional[Any]=2 , a : List[Any]=0.02 , a : Tuple=3 , a : Union[str, Any]=4 , a : Optional[int]=None , )-> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_multiple_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = weight_tying
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = True
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] , a : List[str] , a : Tuple )-> Optional[int]:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a , attention_mask=a )
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : List[str] , a : Any , a : int )-> int:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseModel(a )
model.to(a )
model.eval()
lowercase__ = model(a , attention_mask=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Tuple , a : Optional[int] , a : Dict , a : str )-> int:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
lowercase__ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : str , a : Optional[int] , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = GPTNeoXJapaneseForCausalLM(config=a )
model.to(a )
model.eval()
# first forward pass
lowercase__ = model(a , attention_mask=a , use_cache=a )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(a , attention_mask=a , output_hidden_states=a )
lowercase__ = output_from_no_past['hidden_states'][0]
lowercase__ = model(
a , attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : str = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_UpperCamelCase : Optional[int] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_UpperCamelCase : str = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : str = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : str = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = GPTNeoXJapaneseModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> int:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ = None
self.model_tester.create_and_check_model_as_decoder(a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
lowercase__ = 'abeja/gpt-neox-japanese-2.7b'
lowercase__ = ['データサイエンティストとは、', '100年後に必要とされる会社は、', 'フルリモートの環境で働くために必要なことは、', '国境の長いトンネルを抜けると', '美味しい日本食といえば、']
lowercase__ = [
'データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。',
'100年後に必要とされる会社は、「人」が中心の会社です。',
'フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。',
'国境の長いトンネルを抜けると、そこは雪国だった。',
'美味しい日本食といえば、やっぱりお寿司ですよね。',
]
lowercase__ = GPTNeoXJapaneseTokenizer.from_pretrained(a )
lowercase__ = GPTNeoXJapaneseForCausalLM.from_pretrained(a )
lowercase__ = []
for prompt in prompts:
lowercase__ = tokenizer(a , return_tensors='pt' ).input_ids
lowercase__ = model.generate(a , max_length=50 )
lowercase__ = tokenizer.batch_decode(a , skip_special_tokens=a )
predicted_outputs += generated_string
self.assertListEqual(a , a )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
lowercase_ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class SCREAMING_SNAKE_CASE (nn.Module ):
def __init__( self : List[str] , a : Optional[int] )-> Optional[Any]:
"""simple docstring"""
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=a )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*a )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.pool(self.model(a ) )
lowercase__ = torch.flatten(a , start_dim=2 )
lowercase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Any , a : str , a : str , a : Optional[Any] , a : List[Any] , a : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = [json.loads(a ) for l in open(a )]
lowercase__ = os.path.dirname(a )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(a )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self : Dict )-> Any:
"""simple docstring"""
return len(self.data )
def __getitem__( self : Optional[Any] , a : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=a ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
lowercase__ = self.transforms(a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = [len(row['sentence'] ) for row in batch]
lowercase__ , lowercase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
lowercase__ = input_row['sentence']
lowercase__ = 1
lowercase__ = torch.stack([row['image'] for row in batch] )
lowercase__ = torch.stack([row['label'] for row in batch] )
lowercase__ = torch.stack([row['image_start_token'] for row in batch] )
lowercase__ = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __UpperCamelCase () -> List[str]:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __UpperCamelCase () -> Dict:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4_6_7_7_7_0_4_4, 0.4_4_5_3_1_4_2_9, 0.4_0_6_6_1_0_1_7] , std=[0.1_2_2_2_1_9_9_4, 0.1_2_1_4_5_8_3_5, 0.1_4_3_8_0_4_6_9] , ),
] )
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
lowercase_ = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowercase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return "".join(REVERSE_DICT[char] for char in message.split() )
def __UpperCamelCase () -> None:
lowercase__ = 'Morse code here!'
print(_SCREAMING_SNAKE_CASE )
lowercase__ = encrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
lowercase__ = decrypt(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""shi-labs/dinat-mini-in1k-224""": """https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json""",
# See all Dinat models at https://huggingface.co/models?filter=dinat
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase ):
_UpperCamelCase : int = 'dinat'
_UpperCamelCase : Any = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] , a : Optional[int]=4 , a : Tuple=3 , a : List[str]=64 , a : Optional[Any]=[3, 4, 6, 5] , a : Tuple=[2, 4, 8, 16] , a : Union[str, Any]=7 , a : Optional[Any]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , a : int=3.0 , a : Union[str, Any]=True , a : Any=0.0 , a : Optional[Any]=0.0 , a : int=0.1 , a : Dict="gelu" , a : List[str]=0.02 , a : List[str]=1E-5 , a : Any=0.0 , a : Optional[int]=None , a : Optional[Any]=None , **a : Union[str, Any] , )-> Optional[int]:
"""simple docstring"""
super().__init__(**a )
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = depths
lowercase__ = len(a )
lowercase__ = num_heads
lowercase__ = kernel_size
lowercase__ = dilations
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
# we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase__ = int(embed_dim * 2 ** (len(a ) - 1) )
lowercase__ = layer_scale_init_value
lowercase__ = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(a ) + 1 )]
lowercase__ , lowercase__ = get_aligned_output_features_output_indices(
out_features=a , out_indices=a , stage_names=self.stage_names )
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
lowercase_ = """\
Text data.
Second line of data."""
lowercase_ = """file"""
@pytest.fixture(scope='session' )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
lowercase__ = bytes(_SCREAMING_SNAKE_CASE , 'utf-8' )
with zstd.open(_SCREAMING_SNAKE_CASE , 'wb' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return path
@pytest.fixture
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
with open(os.path.join(tmpfs.local_root_dir , _SCREAMING_SNAKE_CASE ) , 'w' ) as f:
f.write(_SCREAMING_SNAKE_CASE )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowercase__ = input_paths[compression_format]
lowercase__ = tmp_path / 'cache'
lowercase__ = DownloadConfig(cache_dir=_SCREAMING_SNAKE_CASE , extract_compressed_file=_SCREAMING_SNAKE_CASE )
lowercase__ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read()
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = 'custom_cache'
lowercase__ = 'custom_extracted_dir'
lowercase__ = tmp_path / 'custom_extracted_path'
if default_extracted:
lowercase__ = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , _SCREAMING_SNAKE_CASE )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
lowercase__ = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase__ = xz_file
lowercase__ = (
DownloadConfig(extract_compressed_file=_SCREAMING_SNAKE_CASE )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_SCREAMING_SNAKE_CASE )
)
lowercase__ = cached_path(_SCREAMING_SNAKE_CASE , download_config=_SCREAMING_SNAKE_CASE )
assert Path(_SCREAMING_SNAKE_CASE ).parent.parts[-2:] == expected
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
# absolute path
lowercase__ = str(Path(_SCREAMING_SNAKE_CASE ).resolve() )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
# relative path
lowercase__ = str(Path(_SCREAMING_SNAKE_CASE ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_SCREAMING_SNAKE_CASE ) == text_file
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# absolute path
lowercase__ = str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
# relative path
lowercase__ = './__missing_file__.txt'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = get_from_cache(F"""tmp://{tmpfs_file}""" )
with open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase () -> List[Any]:
with pytest.raises(_SCREAMING_SNAKE_CASE ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_get('https://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_get('ftp://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_get('s3://huggingface.co' , temp_file=_SCREAMING_SNAKE_CASE )
with pytest.raises(_SCREAMING_SNAKE_CASE ):
fsspec_head('s3://huggingface.co' )
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = os.path.join(args.tf_model_dir , 'parameters.json' )
lowercase__ = json.loads(open(_SCREAMING_SNAKE_CASE ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
lowercase__ = args.output + '.pt'
lowercase__ = OrderedDict()
with tf.device('/CPU:0' ):
lowercase__ = tf.train.load_checkpoint(args.tf_model_dir )
lowercase__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowercase__ = reader.get_tensor(_SCREAMING_SNAKE_CASE ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
lowercase__ = int(key_name[9] )
elif key_name.startswith('pasts/out' ):
lowercase__ = 8
lowercase__ = 'model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/moe' ):
lowercase__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/softmlp/kernel' ):
lowercase__ = 'model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
lowercase__ = key_name[-9:-7]
for i in range(16 ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
lowercase__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/mlp' ):
lowercase__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.wi.weight' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p1/bias' ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.wi.bias' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p2/kernel' ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.wo.weight' % player
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/p2/bias' ):
lowercase__ = 'model.blocks.%d.feed_forward.mlp.wo.bias' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/ln' ):
lowercase__ = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowercase__ = 'model.blocks.%d.feed_forward.norm.bias' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/g' ):
lowercase__ = 'model.blocks.%d.feed_forward.norm.weight' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/att' ):
lowercase__ = int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
lowercase__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowercase__ = state[:, 0, :, :]
lowercase__ = state[:, 1, :, :]
lowercase__ = state[:, 2, :, :]
lowercase__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = 'model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
lowercase__ = 'model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
lowercase__ = 'model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/o/kernel' ):
lowercase__ = 'model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
lowercase__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/an' ):
lowercase__ = int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
lowercase__ = 'model.blocks.%d.self_attn.norm.bias' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.endswith('/g' ):
lowercase__ = 'model.blocks.%d.self_attn.norm.weight' % player
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
lowercase__ = {'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
lowercase__ = 'model.%s.weight' % nlayer
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
if key_name.startswith('model/wte' ):
lowercase__ = 'lm_head.weight'
lowercase__ = vnp.copy() # same in embedded
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name.startswith('model/wob' ):
lowercase__ = 'final_logits_bias'
lowercase__ = vnp.copy() # same in embedded
lowercase__ = state.reshape((1, -1) )
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name == "model/dense/kernel":
lowercase__ = 'model.last_project.weight'
lowercase__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
elif key_name == "model/dense_1/bias":
lowercase__ = 'model.last_project.bias'
lowercase__ = vnp.copy() # same because it is one dimensional
lowercase__ = torch.tensor(_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , args.output )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
lowercase_ = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Any:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> int:
lowercase__ = {}
for old_key in state_dict.keys():
lowercase__ = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
lowercase__ = key.replace('moe_layer.experts.0' , F"""ffn.experts.expert_{expert_idx}""" )
else:
lowercase__ = key.replace('moe_layer.experts.' , 'ffn.experts.expert_' )
if "gate" in key:
lowercase__ = key.replace('.moe_layer.gate.wg' , '.ffn.router.classifier' )
if "fc2" and "experts" not in key:
lowercase__ = key.replace('.fc2.' , '.ffn.fc2.' )
if "fc1" and "experts" not in key:
lowercase__ = key.replace('.fc1.' , '.ffn.fc1.' )
if ".encoder_attn." in key:
lowercase__ = key.replace('.encoder_attn.' , '.cross_attention.' )
if "encoder_attn_layer_norm" in key:
lowercase__ = key.replace('encoder_attn_layer_norm' , 'cross_attention_layer_norm' )
if "final_layer_norm" in key:
lowercase__ = key.replace('final_layer_norm' , 'ff_layer_norm' )
lowercase__ = state_dict[old_key]
return new_dict
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = WEIGHTS_NAME ) -> List[Any]:
lowercase__ = []
lowercase__ = 0
os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE )
for expert in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = os.path.join(
_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_SCREAMING_SNAKE_CASE )[0]].dtype )
# Add the last block
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{len(_SCREAMING_SNAKE_CASE )+1:05d}-of-???.bin""" ) )
lowercase__ = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = rename_fairseq_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_SCREAMING_SNAKE_CASE ) == 1:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Otherwise, let's build the index
lowercase__ = {}
for idx, shard in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = weights_name.replace('.bin' , F"""-{idx+1:05d}-of-{len(_SCREAMING_SNAKE_CASE ):05d}.bin""" )
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , weights_name.replace('.bin' , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for key in shard:
lowercase__ = shard_file
# Add the metadata
lowercase__ = {'total_size': total_size}
lowercase__ = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 'w' , encoding='utf-8' ) as f:
lowercase__ = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + '\n'
f.write(_SCREAMING_SNAKE_CASE )
return metadata, index
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
lowercase_ = parser.parse_args()
lowercase_ , lowercase_ = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
lowercase_ = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
lowercase_ = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowercase__ = precision
lowercase__ = ceil(precision / 14 )
lowercase__ = 426880 * Decimal(10005 ).sqrt()
lowercase__ = 1
lowercase__ = 13591409
lowercase__ = Decimal(_SCREAMING_SNAKE_CASE )
for k in range(1 , _SCREAMING_SNAKE_CASE ):
lowercase__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(_SCREAMING_SNAKE_CASE ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowercase_ = 50
print(f'''The first {n} digits of pi is: {pi(n)}''')
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
from math import isqrt
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
return all(number % divisor != 0 for divisor in range(2 , isqrt(_SCREAMING_SNAKE_CASE ) + 1 ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 10**6 ) -> int:
lowercase__ = 0
lowercase__ = 1
lowercase__ = 7
while prime_candidate < max_prime:
primes_count += is_prime(_SCREAMING_SNAKE_CASE )
cube_index += 1
prime_candidate += 6 * cube_index
return primes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = to_pil_image(_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ = pil_image.size
lowercase__ = pytesseract.image_to_data(_SCREAMING_SNAKE_CASE , lang=_SCREAMING_SNAKE_CASE , output_type='dict' , config=_SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
lowercase__ = [idx for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if not word.strip()]
lowercase__ = [word for idx, word in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
lowercase__ = [coord for idx, coord in enumerate(_SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase__ = []
for x, y, w, h in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = [x, y, x + w, y + h]
actual_boxes.append(_SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
lowercase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[str] = ['pixel_values']
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : PILImageResampling = PILImageResampling.BILINEAR , a : bool = True , a : float = 1 / 255 , a : bool = True , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = True , a : Optional[str] = None , a : Optional[str] = "" , **a : Any , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'height': 224, 'width': 224}
lowercase__ = get_size_dict(a )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_rescale
lowercase__ = rescale_value
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase__ = apply_ocr
lowercase__ = ocr_lang
lowercase__ = tesseract_config
def SCREAMING_SNAKE_CASE_ ( self : str , a : np.ndarray , a : Dict[str, int] , a : PILImageResampling = PILImageResampling.BILINEAR , a : Optional[Union[str, ChannelDimension]] = None , **a : List[Any] , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowercase__ = (size['height'], size['width'])
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : Union[str, Any] , )-> np.ndarray:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[float, Iterable[float]] , a : Union[float, Iterable[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : List[str]=None , a : bool = None , a : float = None , a : bool = None , a : Union[float, Iterable[float]] = None , a : Union[float, Iterable[float]] = None , a : bool = None , a : Optional[str] = None , a : Optional[str] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : int , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a )
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
lowercase__ = []
lowercase__ = []
for image in images:
lowercase__ , lowercase__ = apply_tesseract(a , a , a )
words_batch.append(a )
boxes_batch.append(a )
if do_resize:
lowercase__ = [self.resize(image=a , size=a , resample=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = BatchFeature(data={'pixel_values': images} , tensor_type=a )
if apply_ocr:
lowercase__ = words_batch
lowercase__ = boxes_batch
return data
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length, 2) , _SCREAMING_SNAKE_CASE )
else:
lowercase__ = np.full((len(_SCREAMING_SNAKE_CASE ), sequence_length) , _SCREAMING_SNAKE_CASE )
for i, tensor in enumerate(_SCREAMING_SNAKE_CASE ):
if padding_side == "right":
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
else:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = tensor[:sequence_length]
else:
lowercase__ = tensor[:sequence_length]
return out_tensor.tolist()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = ord(_SCREAMING_SNAKE_CASE )
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
lowercase__ = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat.startswith('P' ):
return True
return False
@dataclass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
'''simple docstring'''
_UpperCamelCase : PreTrainedTokenizerBase
_UpperCamelCase : Union[bool, str, PaddingStrategy] = True
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : Optional[int] = None
_UpperCamelCase : int = -1_00
_UpperCamelCase : str = "pt"
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[str] )-> str:
"""simple docstring"""
import torch
lowercase__ = 'label' if 'label' in features[0].keys() else 'labels'
lowercase__ = [feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase__ = self.tokenizer.pad(
a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='pt' if labels is None else None , )
if labels is None:
return batch
lowercase__ = torch.tensor(batch['entity_ids'] ).shape[1]
lowercase__ = self.tokenizer.padding_side
if padding_side == "right":
lowercase__ = [
list(a ) + [self.label_pad_token_id] * (sequence_length - len(a )) for label in labels
]
else:
lowercase__ = [
[self.label_pad_token_id] * (sequence_length - len(a )) + list(a ) for label in labels
]
lowercase__ = [feature['ner_tags'] for feature in features]
lowercase__ = padding_tensor(a , -1 , a , a )
lowercase__ = [feature['original_entity_spans'] for feature in features]
lowercase__ = padding_tensor(a , (-1, -1) , a , a )
lowercase__ = {k: torch.tensor(a , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
from collections.abc import Sequence
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = None ) -> int:
if nums is None or not nums:
raise ValueError('Input sequence should not be empty' )
lowercase__ = nums[0]
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = nums[i]
lowercase__ = max(_SCREAMING_SNAKE_CASE , ans + num , _SCREAMING_SNAKE_CASE )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowercase_ = int(input("""Enter number of elements : """).strip())
lowercase_ = list(map(int, input("""\nEnter the numbers : """).strip().split()))[:n]
print(max_subsequence_sum(array))
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = [10, 20, 30, 40, 50, 60]
lowercase__ = [2, 4, 6, 8, 10, 12]
lowercase__ = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Union[str, Any]:
"""simple docstring"""
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : int , )-> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = 13
lowercase__ = 7
lowercase__ = 30
lowercase__ = self.seq_length + self.mem_len
lowercase__ = 15
lowercase__ = True
lowercase__ = True
lowercase__ = 99
lowercase__ = [10, 50, 80]
lowercase__ = 32
lowercase__ = 32
lowercase__ = 4
lowercase__ = 8
lowercase__ = 128
lowercase__ = 2
lowercase__ = 2
lowercase__ = None
lowercase__ = 1
lowercase__ = 0
lowercase__ = 3
lowercase__ = self.vocab_size - 1
lowercase__ = 0.01
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> str:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Union[str, Any] , a : List[str] , a : Union[str, Any] , a : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = TFTransfoXLModel(a )
lowercase__ , lowercase__ = model(a ).to_tuple()
lowercase__ = {'input_ids': input_ids_a, 'mems': mems_a}
lowercase__ , lowercase__ = model(a ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Union[str, Any] , a : Tuple , a : int , a : Tuple )-> str:
"""simple docstring"""
lowercase__ = TFTransfoXLLMHeadModel(a )
lowercase__ , lowercase__ = model(a ).to_tuple()
lowercase__ = {'input_ids': input_ids_a, 'labels': lm_labels}
lowercase__ , lowercase__ = model(a ).to_tuple()
lowercase__ , lowercase__ = model([input_ids_a, mems_a] ).to_tuple()
lowercase__ = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
lowercase__ , lowercase__ = model(a ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Optional[Any] , a : Tuple , a : int , a : Dict )-> Dict:
"""simple docstring"""
lowercase__ = TFTransfoXLForSequenceClassification(a )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
((lowercase__) , (lowercase__) , (lowercase__) , (lowercase__)) = config_and_inputs
lowercase__ = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : int = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
_UpperCamelCase : Tuple = () if is_tf_available() else ()
_UpperCamelCase : List[str] = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Tuple = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Any , a : Union[str, Any] , a : List[str] , a : Tuple , a : Dict )-> str:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
lowercase__ = TFTransfoXLModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , d_embed=37 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Dict:
"""simple docstring"""
self.model_tester.set_seed()
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
self.model_tester.set_seed()
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase__ = model.get_output_embeddings()
assert isinstance(a , tf.keras.layers.Layer )
lowercase__ = model.get_bias()
assert name is None
else:
lowercase__ = model.get_output_embeddings()
assert x is None
lowercase__ = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = TFTransfoXLModel.from_pretrained(a )
self.assertIsNotNone(a )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[str]:
"""simple docstring"""
pass
@require_tf
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
lowercase__ = tf.convert_to_tensor([[33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase__ = [33,1_297,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,22,1_706,17,20_098,5,3_215,21,37,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,6_224,831,16_002,2,8,603,78_967,29_546,23,803,20,25,416,5,8,232,4,277,6,1_855,4_601,3,29_546,54,8,3_609,5,57_211,49,4,1,277,18,8,1_755,15_691,3,341,25,416,693,42_573,71,17,401,94,31,17_919,2,29_546,7_873,18,1,435,23,11_011,755,5,5_167,3,7_983,98,84,2,29_546,3_267,8,3_609,4,1,4_865,1_075,2,6_087,71,6,346,8,5_854,3,29_546,824,1_400,1_868,2,19,160,2,311,8,5_496,2,20_920,17,25,15_097,3,24,24,0,33,1,1_857,2,1,1_009,4,1_109,11_739,4_762,358,5,25,245,28,1_110,3,13,1_041,4,24,603,490,2,71_477,20_098,104_447,2,20_961,1,2_604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase__ = model.generate(a , max_length=200 , do_sample=a )
self.assertListEqual(output_ids[0].numpy().tolist() , a )
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowercase_ = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(_SCREAMING_SNAKE_CASE ) , version.parse(_SCREAMING_SNAKE_CASE ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> None:
lowercase__ = F"""\n{hint}""" if hint is not None else ''
# non-versioned check
if re.match(R'^[\w_\-\d]+$' , _SCREAMING_SNAKE_CASE ):
lowercase__ , lowercase__ , lowercase__ = requirement, None, None
else:
lowercase__ = re.findall(R'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
F""" got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_full.split(',' ) # there could be multiple requirements
lowercase__ = {}
for w in want_range:
lowercase__ = re.findall(R'^([\s!=<>]{1,2})(.+)' , _SCREAMING_SNAKE_CASE )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
F""" but got {requirement}""" )
lowercase__ , lowercase__ = match[0]
lowercase__ = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowercase__ = '.'.join([str(_SCREAMING_SNAKE_CASE ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return
# check if any version is installed
try:
lowercase__ = importlib.metadata.version(_SCREAMING_SNAKE_CASE )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowercase__ = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = features[:, labels == i]
lowercase__ = data.mean(1 )
# Centralize the data of class i
lowercase__ = data - column_reshape(_SCREAMING_SNAKE_CASE )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_SCREAMING_SNAKE_CASE , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T )
return covariance_sum / features.shape[1]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
lowercase__ = features.mean(1 )
lowercase__ = np.nan
for i in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = features[:, labels == i]
lowercase__ = data.shape[1]
lowercase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowercase__ = device_data * np.dot(
column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE ) , (column_reshape(_SCREAMING_SNAKE_CASE ) - column_reshape(_SCREAMING_SNAKE_CASE )).T , )
return covariance_sum / features.shape[1]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
lowercase__ = features.mean(1 )
# Center the dataset
lowercase__ = features - np.reshape(_SCREAMING_SNAKE_CASE , (data_mean.size, 1) )
lowercase__ = np.dot(_SCREAMING_SNAKE_CASE , centered_data.T ) / features.shape[1]
lowercase__ , lowercase__ = np.linalg.eigh(_SCREAMING_SNAKE_CASE )
# Take all the columns in the reverse order (-1), and then takes only the first
lowercase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowercase__ = np.dot(filtered_eigenvectors.T , _SCREAMING_SNAKE_CASE )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_SCREAMING_SNAKE_CASE )
logging.error('Dataset empty' )
raise AssertionError
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowercase__ , lowercase__ = eigh(
covariance_between_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , covariance_within_classes(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , )
lowercase__ = eigenvectors[:, ::-1][:, :dimensions]
lowercase__ , lowercase__ , lowercase__ = np.linalg.svd(_SCREAMING_SNAKE_CASE )
lowercase__ = svd_matrix[:, 0:dimensions]
lowercase__ = np.dot(filtered_svd_matrix.T , _SCREAMING_SNAKE_CASE )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=_SCREAMING_SNAKE_CASE )
logging.error('Dataset empty' )
raise AssertionError
def __UpperCamelCase () -> None:
# Create dummy dataset with 2 classes and 3 features
lowercase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowercase__ = np.array([0, 0, 0, 1, 1] )
lowercase__ = 2
lowercase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowercase__ = linear_discriminant_analysis(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def __UpperCamelCase () -> None:
lowercase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowercase__ = 2
lowercase__ = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 10.39230485], [3.0, 3.0, 3.0]] )
with pytest.raises(_SCREAMING_SNAKE_CASE ) as error_info:
lowercase__ = principal_component_analysis(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
# Algorithm for the pigeonhole sorting
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
lowercase__ = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
lowercase__ = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
lowercase__ = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
lowercase__ = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
lowercase__ = count + min_val
i += 1
def __UpperCamelCase () -> str:
lowercase__ = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print('Sorted order is:' , ' '.join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase , UpperCAmelCase ):
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Any:
"""simple docstring"""
lowercase__ = load_tool('text-to-speech' )
self.tool.setup()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = self.tool('hey' )
lowercase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = self.tool('hey' )
lowercase__ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0005966668832115829, -0.0003657640190795064, -0.00013439502799883485] ) , ) )
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
lowercase_ = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : str = CamembertTokenizer
_UpperCamelCase : Any = CamembertTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = CamembertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = '<pad>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a ) , 1_004 )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = CamembertTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
lowercase__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
lowercase__ = tokenizer.convert_ids_to_tokens(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = {'input_ids': [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
lowercase__ = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a , )
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
lowercase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
lowercase__ = ''.join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
lowercase__ = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = b'=' * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
lowercase__ = b''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = (
'argument should be a bytes-like object or ASCII string, '
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
lowercase__ = encoded_data.decode('utf-8' )
except UnicodeDecodeError:
raise ValueError('base64 encoded data should only contain ASCII characters' )
lowercase__ = encoded_data.count('=' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = ''.join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
lowercase_ = {
"""joule""": 1.0,
"""kilojoule""": 1_000,
"""megajoule""": 1_000_000,
"""gigajoule""": 1_000_000_000,
"""wattsecond""": 1.0,
"""watthour""": 3_600,
"""kilowatthour""": 3_600_000,
"""newtonmeter""": 1.0,
"""calorie_nutr""": 4_186.8,
"""kilocalorie_nutr""": 4_186_800.00,
"""electronvolt""": 1.6_0217_6634E-19,
"""britishthermalunit_it""": 1_055.05_585,
"""footpound""": 1.355_818,
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowercase__ = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(_SCREAMING_SNAKE_CASE )}"""
)
raise ValueError(_SCREAMING_SNAKE_CASE )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ = logging.getLogger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
# save results
if os.path.exists(_SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
else:
os.makedirs(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Union[str, Any]:
lowercase__ = 2
if unlogit:
lowercase__ = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = p * torch.log(_SCREAMING_SNAKE_CASE )
lowercase__ = 0
return -plogp.sum(dim=-1 )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
logger.info('lv, h >\t' + '\t'.join(F"""{x + 1}""" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(_SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:.5f}""" for x in tensor[row].cpu().data ) )
else:
logger.info(F"""layer {row + 1}:\t""" + '\t'.join(F"""{x:d}""" for x in tensor[row].cpu().data ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ , lowercase__ = model.config.num_hidden_layers, model.config.num_attention_heads
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
lowercase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
lowercase__ = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowercase__ = None
lowercase__ = 0.0
lowercase__ = 0.0
for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
lowercase__ = tuple(t.to(args.device ) for t in inputs )
((lowercase__ ) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowercase__ = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowercase__ , lowercase__ , lowercase__ = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ):
lowercase__ = entropy(attn.detach() , _SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowercase__ = 2
lowercase__ = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
lowercase__ = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
logger.info('Head ranked by importance scores' )
lowercase__ = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowercase__ = torch.arange(
head_importance.numel() , device=args.device )
lowercase__ = head_ranks.view_as(_SCREAMING_SNAKE_CASE )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE )
lowercase__ = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
lowercase__ = torch.ones_like(_SCREAMING_SNAKE_CASE )
lowercase__ = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowercase__ = original_score
while current_score >= original_score * args.masking_threshold:
lowercase__ = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowercase__ = float('Inf' )
lowercase__ = head_importance.view(-1 ).sort()[1]
if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
lowercase__ = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
lowercase__ = new_head_mask.view(-1 )
lowercase__ = 0.0
lowercase__ = new_head_mask.view_as(_SCREAMING_SNAKE_CASE )
lowercase__ = new_head_mask.clone().detach()
print_ad_tensor(_SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowercase__ = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = datetime.now()
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowercase__ = 1 / loss
lowercase__ = datetime.now() - before_time
lowercase__ = sum(p.numel() for p in model.parameters() )
lowercase__ = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowercase__ = [
v,
]
assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_SCREAMING_SNAKE_CASE )
lowercase__ = sum(p.numel() for p in model.parameters() )
lowercase__ = datetime.now()
lowercase__ , lowercase__ , lowercase__ = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , )
lowercase__ = 1 / loss
lowercase__ = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_SCREAMING_SNAKE_CASE , args.output_dir )
def __UpperCamelCase () -> Union[str, Any]:
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
lowercase__ = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowercase__ = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
lowercase__ = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowercase__ = torch.device('cuda' , args.local_rank )
lowercase__ = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowercase__ = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowercase__ = nn.parallel.DistributedDataParallel(
_SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
lowercase__ = nn.DataParallel(_SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Prepare dataset
lowercase__ = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowercase__ = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),)
lowercase__ = TensorDataset(*_SCREAMING_SNAKE_CASE )
lowercase__ = RandomSampler(_SCREAMING_SNAKE_CASE )
lowercase__ = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowercase__ = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : List[Any] , a : Tuple , a : Tuple=7 , a : List[str]=3 , a : Dict=18 , a : int=30 , a : List[Any]=400 , a : Any=True , a : Any=32 , a : str=True , )-> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size_divisor
lowercase__ = do_rescale
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[int] = GLPNImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
lowercase__ = GLPNImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size_divisor' ) )
self.assertTrue(hasattr(a , 'resample' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : Optional[str] = field(
default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'The column name of the images in the files.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the training data.'} )
_UpperCamelCase : Optional[str] = field(default=UpperCAmelCase , metadata={'help': 'A folder containing the validation data.'} )
_UpperCamelCase : Optional[float] = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
_UpperCamelCase : Optional[int] = field(
default=UpperCAmelCase , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = {}
if self.train_dir is not None:
lowercase__ = self.train_dir
if self.validation_dir is not None:
lowercase__ = self.validation_dir
lowercase__ = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE :
_UpperCamelCase : str = field(
default=UpperCAmelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
_UpperCamelCase : Optional[str] = field(
default=UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
_UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
_UpperCamelCase : str = field(default=UpperCAmelCase , metadata={'help': 'Name or path of preprocessor config.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
_UpperCamelCase : float = field(
default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} )
_UpperCamelCase : bool = field(
default=UpperCAmelCase , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} )
@dataclass
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : float = field(
default=1E-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = torch.stack([example['pixel_values'] for example in examples] )
return {"pixel_values": pixel_values}
def __UpperCamelCase () -> List[str]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mae' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase__ = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowercase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
lowercase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
lowercase__ = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , _SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0:
lowercase__ = ds['train'].train_test_split(data_args.train_val_split )
lowercase__ = split['train']
lowercase__ = split['test']
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.config_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase__ = ViTMAEConfig()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
# adapt config
config.update(
{
'mask_ratio': model_args.mask_ratio,
'norm_pix_loss': model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **_SCREAMING_SNAKE_CASE )
elif model_args.model_name_or_path:
lowercase__ = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **_SCREAMING_SNAKE_CASE )
else:
lowercase__ = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
lowercase__ = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
lowercase__ = ViTMAEForPreTraining(_SCREAMING_SNAKE_CASE )
if training_args.do_train:
lowercase__ = ds['train'].column_names
else:
lowercase__ = ds['validation'].column_names
if data_args.image_column_name is not None:
lowercase__ = data_args.image_column_name
elif "image" in column_names:
lowercase__ = 'image'
elif "img" in column_names:
lowercase__ = 'img'
else:
lowercase__ = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
lowercase__ = image_processor.size['shortest_edge']
else:
lowercase__ = (image_processor.size['height'], image_processor.size['width'])
lowercase__ = Compose(
[
Lambda(lambda _SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(_SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(_SCREAMING_SNAKE_CASE ):
lowercase__ = [transforms(_SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
lowercase__ = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(_SCREAMING_SNAKE_CASE )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
lowercase__ = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(_SCREAMING_SNAKE_CASE )
# Compute absolute learning rate
lowercase__ = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
lowercase__ = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
lowercase__ = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowercase__ = None
if training_args.resume_from_checkpoint is not None:
lowercase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase__ = last_checkpoint
lowercase__ = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowercase__ = trainer.evaluate()
trainer.log_metrics('eval' , _SCREAMING_SNAKE_CASE )
trainer.save_metrics('eval' , _SCREAMING_SNAKE_CASE )
# Write model card and (optionally) push to hub
lowercase__ = {
'tasks': 'masked-auto-encoding',
'dataset': data_args.dataset_name,
'tags': ['masked-auto-encoding'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : List[str] , a : int=13 , a : List[Any]=7 , a : Optional[Any]=True , a : Dict=True , a : str=99 , a : str=32 , a : int=5 , a : int=4 , a : Dict=37 , a : Optional[int]="gelu" , a : Any=0.1 , a : Any=0.1 , a : int=50 , a : Optional[Any]=0.02 , a : Any=True , a : Optional[int]=None , )-> Any:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = use_labels
lowercase__ = scope
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = self.get_config()
return config, input_ids, input_mask, token_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=a , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.prepare_config_and_inputs()
lowercase__ = True
lowercase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int , a : List[Any] , a : List[Any] , a : Dict , **a : List[Any] , )-> Dict:
"""simple docstring"""
lowercase__ = BertGenerationEncoder(config=a )
model.to(a )
model.eval()
lowercase__ = model(a , attention_mask=a )
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[str] , a : Any , a : Dict , a : int , a : Any , a : int , **a : Optional[int] , )-> Dict:
"""simple docstring"""
lowercase__ = True
lowercase__ = BertGenerationEncoder(config=a )
model.to(a )
model.eval()
lowercase__ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowercase__ = model(
a , attention_mask=a , encoder_hidden_states=a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : Any , a : List[str] , a : Any , **a : List[Any] , )-> Optional[Any]:
"""simple docstring"""
lowercase__ = True
lowercase__ = True
lowercase__ = BertGenerationDecoder(config=a ).to(a ).eval()
# first forward pass
lowercase__ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , use_cache=a , )
lowercase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
lowercase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
lowercase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowercase__ = torch.cat([input_mask, next_mask] , dim=-1 )
lowercase__ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , output_hidden_states=a , )['hidden_states'][0]
lowercase__ = model(
a , attention_mask=a , encoder_hidden_states=a , encoder_attention_mask=a , past_key_values=a , output_hidden_states=a , )['hidden_states'][0]
# select random slice
lowercase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowercase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
lowercase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a , a , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : List[str] , a : Optional[int] , a : int , a : Any , *a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = BertGenerationDecoder(a )
model.to(a )
model.eval()
lowercase__ = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.prepare_config_and_inputs()
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : int = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = (BertGenerationDecoder,) if is_torch_available() else ()
_UpperCamelCase : str = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = BertGenerationEncoderTester(self )
lowercase__ = ConfigTester(self , config_class=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ , lowercase__ , lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs()
lowercase__ = 'bert'
self.model_tester.create_and_check_model(a , a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase__ = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(a )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
lowercase__ = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowercase__ = model(a )[0]
lowercase__ = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , a )
lowercase__ = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
lowercase__ = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
lowercase__ = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
lowercase__ = model(a )[0]
lowercase__ = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , a )
lowercase__ = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , a , atol=1E-4 ) )
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[str] = KandinskyInpaintPipeline
_UpperCamelCase : Optional[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_UpperCamelCase : List[Any] = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_UpperCamelCase : int = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_UpperCamelCase : str = False
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[Any]:
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Any:
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowercase__ = MultilingualCLIP(a )
lowercase__ = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[Any]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowercase__ = UNetaDConditionModel(**a )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[int]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
lowercase__ = self.dummy_text_encoder
lowercase__ = self.dummy_tokenizer
lowercase__ = self.dummy_unet
lowercase__ = self.dummy_movq
lowercase__ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.00085 , beta_end=0.012 , clip_sample=a , set_alpha_to_one=a , steps_offset=1 , prediction_type='epsilon' , thresholding=a , )
lowercase__ = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Tuple , a : Any=0 )-> Optional[Any]:
"""simple docstring"""
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
lowercase__ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
lowercase__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowercase__ = Image.fromarray(np.uinta(a ) ).convert('RGB' ).resize((256, 256) )
# create mask
lowercase__ = np.ones((64, 64) , dtype=np.floataa )
lowercase__ = 0
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : int )-> str:
"""simple docstring"""
lowercase__ = 'cpu'
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**a )
lowercase__ = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = pipe(**self.get_dummy_inputs(a ) )
lowercase__ = output.images
lowercase__ = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowercase__ = image[0, -3:, -3:, -1]
lowercase__ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowercase__ = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Dict:
"""simple docstring"""
lowercase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowercase__ = np.ones((768, 768) , dtype=np.floataa )
lowercase__ = 0
lowercase__ = 'a hat'
lowercase__ = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(a )
lowercase__ = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint' , torch_dtype=torch.floataa )
lowercase__ = pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
lowercase__ = torch.Generator(device='cpu' ).manual_seed(0 )
lowercase__ , lowercase__ = pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
lowercase__ = pipeline(
a , image=a , mask_image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=100 , height=768 , width=768 , output_type='np' , )
lowercase__ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a , a )
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , *a : Optional[int] , **a : List[Any] )-> None:
"""simple docstring"""
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""allenai/longformer-base-4096""": """https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096""": """https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json""",
"""allenai/longformer-large-4096-finetuned-triviaqa""": (
"""https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json"""
),
"""allenai/longformer-base-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json"""
),
"""allenai/longformer-large-4096-extra.pos.embd.only""": (
"""https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = 'longformer'
def __init__( self : Optional[int] , a : Union[List[int], int] = 512 , a : int = 2 , a : int = 1 , a : int = 0 , a : int = 2 , a : int = 30_522 , a : int = 768 , a : int = 12 , a : int = 12 , a : int = 3_072 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : int = 512 , a : int = 2 , a : float = 0.02 , a : float = 1E-1_2 , a : bool = False , **a : Optional[int] , )-> Dict:
"""simple docstring"""
super().__init__(pad_token_id=a , **a )
lowercase__ = attention_window
lowercase__ = sep_token_id
lowercase__ = bos_token_id
lowercase__ = eos_token_id
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = onnx_export
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Tuple , a : "PretrainedConfig" , a : str = "default" , a : "List[PatchingSpec]" = None )-> List[str]:
"""simple docstring"""
super().__init__(a , a , a )
lowercase__ = True
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
lowercase__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowercase__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : str )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = super().outputs
if self.task == "default":
lowercase__ = {0: 'batch'}
return outputs
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> float:
"""simple docstring"""
return 1E-4
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> int:
"""simple docstring"""
return max(super().default_onnx_opset , 14 )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : "PreTrainedTokenizerBase" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , )-> Mapping[str, Any]:
"""simple docstring"""
lowercase__ = super().generate_dummy_inputs(
preprocessor=a , batch_size=a , seq_length=a , is_pair=a , framework=a )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
lowercase__ = torch.zeros_like(inputs['input_ids'] )
# make every second token global
lowercase__ = 1
return inputs
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = int(_SCREAMING_SNAKE_CASE )
assert noofclusters < len(_SCREAMING_SNAKE_CASE )
# Find out the dimensionality
lowercase__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
lowercase__ = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
shuffle(_SCREAMING_SNAKE_CASE )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
lowercase__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
lowercase__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
lowercase__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_SCREAMING_SNAKE_CASE )
]
##These nodes will assign the centroid Variables the appropriate
##values
lowercase__ = tf.placeholder('float64' , [dim] )
lowercase__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
lowercase__ = [tf.Variable(0 ) for i in range(len(_SCREAMING_SNAKE_CASE ) )]
##These nodes will assign an assignment Variable the appropriate
##value
lowercase__ = tf.placeholder('int32' )
lowercase__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
lowercase__ = tf.placeholder('float' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
lowercase__ = tf.reduce_mean(_SCREAMING_SNAKE_CASE , 0 )
##Node for computing Euclidean distances
# Placeholders for input
lowercase__ = tf.placeholder('float' , [dim] )
lowercase__ = tf.placeholder('float' , [dim] )
lowercase__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
lowercase__ = tf.placeholder('float' , [noofclusters] )
lowercase__ = tf.argmin(_SCREAMING_SNAKE_CASE , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
lowercase__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(_SCREAMING_SNAKE_CASE )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
lowercase__ = 100
for _ in range(_SCREAMING_SNAKE_CASE ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
lowercase__ = [
sess.run(_SCREAMING_SNAKE_CASE , feed_dict={va: vect, va: sess.run(_SCREAMING_SNAKE_CASE )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
lowercase__ = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_SCREAMING_SNAKE_CASE ):
# Collect all the vectors assigned to this cluster
lowercase__ = [
vectors[i]
for i in range(len(_SCREAMING_SNAKE_CASE ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
lowercase__ = sess.run(
_SCREAMING_SNAKE_CASE , feed_dict={mean_input: array(_SCREAMING_SNAKE_CASE )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
lowercase__ = sess.run(_SCREAMING_SNAKE_CASE )
lowercase__ = sess.run(_SCREAMING_SNAKE_CASE )
return centroids, assignments
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : List[str]=32 , a : int=2 , a : Optional[Any]=3 , a : List[str]=16 , a : Union[str, Any]=[32, 64, 128] , a : Tuple=[1, 2, 1] , a : Optional[int]=[2, 2, 4] , a : Optional[Any]=2 , a : str=2.0 , a : Any=True , a : List[Any]=0.0 , a : Tuple=0.0 , a : str=0.1 , a : Dict="gelu" , a : Tuple=False , a : Optional[int]=True , a : str=0.02 , a : int=1E-5 , a : int=True , a : Union[str, Any]=None , a : List[Any]=True , a : Union[str, Any]=10 , a : Any=8 , a : str=["stage1", "stage2"] , a : Dict=[1, 2] , )-> str:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = embed_dim
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = num_heads
lowercase__ = window_size
lowercase__ = mlp_ratio
lowercase__ = qkv_bias
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = drop_path_rate
lowercase__ = hidden_act
lowercase__ = use_absolute_embeddings
lowercase__ = patch_norm
lowercase__ = layer_norm_eps
lowercase__ = initializer_range
lowercase__ = is_training
lowercase__ = scope
lowercase__ = use_labels
lowercase__ = type_sequence_label_size
lowercase__ = encoder_stride
lowercase__ = out_features
lowercase__ = out_indices
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> str:
"""simple docstring"""
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : Optional[int] , a : int , a : int )-> Optional[int]:
"""simple docstring"""
lowercase__ = FocalNetModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
lowercase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : int , a : List[Any] , a : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = FocalNetBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase__ = None
lowercase__ = FocalNetBackbone(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[Any] , a : Optional[Any] , a : int )-> Optional[Any]:
"""simple docstring"""
lowercase__ = FocalNetForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Union[str, Any] , a : Union[str, Any] , a : Dict )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = FocalNetForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = FocalNetForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Tuple = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_UpperCamelCase : List[str] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Any = False
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : Optional[int] = False
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = FocalNetModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , embed_dim=37 , has_text_modality=a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
return
def SCREAMING_SNAKE_CASE_ ( self : Any )-> List[str]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Any:
"""simple docstring"""
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : Optional[Any] , a : Any , a : int , a : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(a , a ) )
lowercase__ = outputs.hidden_states
lowercase__ = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# FocalNet has a different seq_length
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowercase__ , lowercase__ , lowercase__ , lowercase__ = reshaped_hidden_states[0].shape
lowercase__ = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(a , a , a , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = 3
lowercase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase__ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase__ = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[str]:
"""simple docstring"""
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = FocalNetModel.from_pretrained(a )
self.assertIsNotNone(a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase__ = _config_zero_init(a )
for model_class in self.all_model_classes:
lowercase__ = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> str:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (FocalNetBackbone,) if is_torch_available() else ()
_UpperCamelCase : Union[str, Any] = FocalNetConfig
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> int:
"""simple docstring"""
lowercase__ = FocalNetModelTester(self )
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from __future__ import annotations
import bisect
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> int:
if hi < 0:
lowercase__ = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase__ = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
lowercase__ = mid + 1
else:
lowercase__ = mid
return lo
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> int:
if hi < 0:
lowercase__ = len(_SCREAMING_SNAKE_CASE )
while lo < hi:
lowercase__ = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
lowercase__ = mid + 1
else:
lowercase__ = mid
return lo
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> None:
sorted_collection.insert(bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = -1 ) -> None:
sorted_collection.insert(bisect_right(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
lowercase__ = 0
lowercase__ = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
lowercase__ = left + (right - left) // 2
lowercase__ = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
lowercase__ = midpoint - 1
else:
lowercase__ = midpoint + 1
return None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
lowercase__ = bisect.bisect_left(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if index != len(_SCREAMING_SNAKE_CASE ) and sorted_collection[index] == item:
return index
return None
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int | None:
if right < left:
return None
lowercase__ = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint - 1 )
else:
return binary_search_by_recursion(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , midpoint + 1 , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by comma:\n""").strip()
lowercase_ = sorted(int(item) for item in user_input.split(""","""))
lowercase_ = int(input("""Enter a single number to be found in the list:\n"""))
lowercase_ = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.txt"""}
lowercase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowercase_ = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowercase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = ConvBertTokenizer
def __init__( self : Any , a : int=None , a : Union[str, Any]=None , a : List[Any]=True , a : Optional[Any]="[UNK]" , a : Optional[Any]="[SEP]" , a : int="[PAD]" , a : Any="[CLS]" , a : List[str]="[MASK]" , a : Optional[Any]=True , a : Optional[int]=None , **a : Optional[Any] , )-> Dict:
"""simple docstring"""
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
lowercase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , a ) != do_lower_case
or normalizer_state.get('strip_accents' , a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , a ) != tokenize_chinese_chars
):
lowercase__ = getattr(a , normalizer_state.pop('type' ) )
lowercase__ = do_lower_case
lowercase__ = strip_accents
lowercase__ = tokenize_chinese_chars
lowercase__ = normalizer_class(**a )
lowercase__ = do_lower_case
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Any , a : List[str]=None )-> str:
"""simple docstring"""
lowercase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
lowercase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Union[str, Any] = 'sew-d'
def __init__( self : Any , a : str=32 , a : Optional[int]=768 , a : List[str]=12 , a : Optional[int]=12 , a : Tuple=3_072 , a : Optional[Any]=2 , a : Any=512 , a : List[str]=256 , a : int=True , a : int=True , a : List[Any]=("p2c", "c2p") , a : Any="layer_norm" , a : str="gelu_python" , a : Optional[Any]=0.1 , a : str=0.1 , a : Any=0.1 , a : Tuple=0.0 , a : List[Any]=0.1 , a : List[str]=0.02 , a : Optional[Any]=1E-7 , a : List[Any]=1E-5 , a : Optional[int]="group" , a : Tuple="gelu" , a : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , a : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a : str=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a : List[Any]=False , a : Optional[int]=128 , a : Tuple=16 , a : Tuple=True , a : Optional[Any]=0.05 , a : List[Any]=10 , a : Dict=2 , a : Optional[Any]=0.0 , a : Optional[Any]=10 , a : Any=0 , a : Union[str, Any]="mean" , a : List[str]=False , a : str=False , a : Union[str, Any]=256 , a : int=0 , a : Optional[Any]=1 , a : Tuple=2 , **a : List[Any] , )-> Optional[Any]:
"""simple docstring"""
super().__init__(**a , pad_token_id=a , bos_token_id=a , eos_token_id=a )
lowercase__ = hidden_size
lowercase__ = feat_extract_norm
lowercase__ = feat_extract_activation
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = list(a )
lowercase__ = conv_bias
lowercase__ = num_conv_pos_embeddings
lowercase__ = num_conv_pos_embedding_groups
lowercase__ = len(self.conv_dim )
lowercase__ = num_hidden_layers
lowercase__ = intermediate_size
lowercase__ = squeeze_factor
lowercase__ = max_position_embeddings
lowercase__ = position_buckets
lowercase__ = share_att_key
lowercase__ = relative_attention
lowercase__ = norm_rel_ebd
lowercase__ = list(a )
lowercase__ = hidden_act
lowercase__ = num_attention_heads
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = feat_proj_dropout
lowercase__ = final_dropout
lowercase__ = layer_norm_eps
lowercase__ = feature_layer_norm_eps
lowercase__ = initializer_range
lowercase__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowercase__ = apply_spec_augment
lowercase__ = mask_time_prob
lowercase__ = mask_time_length
lowercase__ = mask_time_min_masks
lowercase__ = mask_feature_prob
lowercase__ = mask_feature_length
lowercase__ = mask_feature_min_masks
# ctc loss
lowercase__ = ctc_loss_reduction
lowercase__ = ctc_zero_infinity
# sequence classification
lowercase__ = use_weighted_layer_sum
lowercase__ = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
if not nums:
return 0
lowercase__ = nums[0]
lowercase__ = 0
for num in nums[1:]:
lowercase__ , lowercase__ = (
max_excluding + num,
max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ),
)
return max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowercase_ = 6_378_137.0
lowercase_ = 6_356_752.314_245
lowercase_ = 6_378_137
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
lowercase__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
lowercase__ = atan((1 - flattening) * tan(radians(_SCREAMING_SNAKE_CASE ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
lowercase__ = haversine_distance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
lowercase__ = (b_lata + b_lata) / 2
lowercase__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
lowercase__ = (sin(_SCREAMING_SNAKE_CASE ) ** 2) * (cos(_SCREAMING_SNAKE_CASE ) ** 2)
lowercase__ = cos(sigma / 2 ) ** 2
lowercase__ = (sigma - sin(_SCREAMING_SNAKE_CASE )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
lowercase__ = (cos(_SCREAMING_SNAKE_CASE ) ** 2) * (sin(_SCREAMING_SNAKE_CASE ) ** 2)
lowercase__ = sin(sigma / 2 ) ** 2
lowercase__ = (sigma + sin(_SCREAMING_SNAKE_CASE )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase_ = get_tests_dir("""fixtures""")
lowercase_ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowercase_ = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> str:
"""simple docstring"""
lowercase__ = 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(a , a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[str]:
"""simple docstring"""
lowercase__ = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowercase__ = AutoFeatureExtractor.from_pretrained(a ).to_dict()
config_dict.pop('feature_extractor_type' )
lowercase__ = WavaVecaFeatureExtractor(**a )
# save in new folder
model_config.save_pretrained(a )
config.save_pretrained(a )
lowercase__ = AutoFeatureExtractor.from_pretrained(a )
# make sure private variable is not incorrectly saved
lowercase__ = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(a , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Tuple:
"""simple docstring"""
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier' ):
lowercase__ = AutoFeatureExtractor.from_pretrained('bert-base' )
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowercase__ = AutoFeatureExtractor.from_pretrained(a , revision='aaaaaa' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Any:
"""simple docstring"""
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowercase__ = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[str]:
"""simple docstring"""
with self.assertRaises(a ):
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a )
lowercase__ = AutoFeatureExtractor.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Dict:
"""simple docstring"""
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoFeatureExtractor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowercase__ = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a )
lowercase__ = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = True
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
# If remote code is not set, the default is to use local
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowercase__ = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = RobertaPreLayerNormConfig.from_pretrained(
_SCREAMING_SNAKE_CASE , architectures=['RobertaPreLayerNormForMaskedLM'] )
# convert state_dict
lowercase__ = torch.load(hf_hub_download(repo_id=_SCREAMING_SNAKE_CASE , filename='pytorch_model.bin' ) )
lowercase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('roberta.' ):
lowercase__ = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ):
continue
lowercase__ = tensor_value
lowercase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE , state_dict=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
# convert tokenizer
lowercase__ = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase_ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = 'Wav2Vec2FeatureExtractor'
_UpperCamelCase : Any = 'AutoTokenizer'
def __init__( self : str , a : Dict , a : List[str] )-> Tuple:
"""simple docstring"""
super().__init__(a , a )
lowercase__ = self.feature_extractor
lowercase__ = False
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , a : List[Any] , **a : int )-> Dict:
"""simple docstring"""
try:
return super().from_pretrained(a , **a )
except OSError:
warnings.warn(
f"""Loading a tokenizer inside {cls.__name__} from a config that does not"""
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , a , )
lowercase__ = WavaVecaFeatureExtractor.from_pretrained(a , **a )
lowercase__ = WavaVecaCTCTokenizer.from_pretrained(a , **a )
return cls(feature_extractor=a , tokenizer=a )
def __call__( self : Dict , *a : Optional[Any] , **a : Optional[Any] )-> List[Any]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*a , **a )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
lowercase__ = kwargs.pop('raw_speech' )
else:
lowercase__ = kwargs.pop('audio' , a )
lowercase__ = kwargs.pop('sampling_rate' , a )
lowercase__ = kwargs.pop('text' , a )
if len(a ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
lowercase__ = self.feature_extractor(a , *a , sampling_rate=a , **a )
if text is not None:
lowercase__ = self.tokenizer(a , **a )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowercase__ = encodings['input_ids']
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any , *a : Tuple , **a : Any )-> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*a , **a )
lowercase__ = kwargs.pop('input_features' , a )
lowercase__ = kwargs.pop('labels' , a )
if len(a ) > 0:
lowercase__ = args[0]
lowercase__ = args[1:]
if input_features is not None:
lowercase__ = self.feature_extractor.pad(a , *a , **a )
if labels is not None:
lowercase__ = self.tokenizer.pad(a , **a )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
lowercase__ = labels['input_ids']
return input_features
def SCREAMING_SNAKE_CASE_ ( self : Any , *a : Optional[Any] , **a : Optional[Any] )-> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , *a : str , **a : str )-> int:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@contextmanager
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Any:
"""simple docstring"""
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
lowercase__ = True
lowercase__ = self.tokenizer
yield
lowercase__ = self.feature_extractor
lowercase__ = False
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[int]:
lowercase__ = 0
lowercase__ = len(_SCREAMING_SNAKE_CASE ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
lowercase__ = i + 1
else:
lowercase__ = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{two_pointer([2, 7, 11, 15], 9) = }''')
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] , a : Collection[float] | None = None )-> None:
"""simple docstring"""
if components is None:
lowercase__ = []
lowercase__ = list(a )
def __len__( self : int )-> int:
"""simple docstring"""
return len(self.__components )
def __str__( self : Dict )-> str:
"""simple docstring"""
return "(" + ",".join(map(a , self.__components ) ) + ")"
def __add__( self : str , a : Vector )-> Vector:
"""simple docstring"""
lowercase__ = len(self )
if size == len(a ):
lowercase__ = [self.__components[i] + other.component(a ) for i in range(a )]
return Vector(a )
else:
raise Exception('must have the same size' )
def __sub__( self : List[Any] , a : Vector )-> Vector:
"""simple docstring"""
lowercase__ = len(self )
if size == len(a ):
lowercase__ = [self.__components[i] - other.component(a ) for i in range(a )]
return Vector(a )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self : List[str] , a : float )-> Vector:
"""simple docstring"""
...
@overload
def __mul__( self : Any , a : Vector )-> float:
"""simple docstring"""
...
def __mul__( self : Optional[Any] , a : float | Vector )-> float | Vector:
"""simple docstring"""
if isinstance(a , (float, int) ):
lowercase__ = [c * other for c in self.__components]
return Vector(a )
elif isinstance(a , a ) and len(self ) == len(a ):
lowercase__ = len(self )
lowercase__ = [self.__components[i] * other.component(a ) for i in range(a )]
return sum(a )
else: # error case
raise Exception('invalid operand!' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Vector:
"""simple docstring"""
return Vector(self.__components )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : int )-> float:
"""simple docstring"""
if isinstance(a , a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : int , a : float )-> None:
"""simple docstring"""
assert -len(self.__components ) <= pos < len(self.__components )
lowercase__ = value
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> float:
"""simple docstring"""
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
lowercase__ = [c**2 for c in self.__components]
return math.sqrt(sum(a ) )
def SCREAMING_SNAKE_CASE_ ( self : str , a : Vector , a : bool = False )-> float:
"""simple docstring"""
lowercase__ = self * other
lowercase__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Vector:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return Vector([0] * dimension )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Vector:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ))
lowercase__ = [0] * dimension
lowercase__ = 1
return Vector(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Vector:
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (isinstance(_SCREAMING_SNAKE_CASE , (int, float) ))
)
return x * scalar + y
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Vector:
random.seed(_SCREAMING_SNAKE_CASE )
lowercase__ = [random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )]
return Vector(_SCREAMING_SNAKE_CASE )
class SCREAMING_SNAKE_CASE :
def __init__( self : str , a : list[list[float]] , a : int , a : int )-> None:
"""simple docstring"""
lowercase__ = matrix
lowercase__ = w
lowercase__ = h
def __str__( self : List[Any] )-> str:
"""simple docstring"""
lowercase__ = ''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , a : Matrix )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase__ = []
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] + other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self : Optional[Any] , a : Matrix )-> Matrix:
"""simple docstring"""
if self.__width == other.width() and self.__height == other.height():
lowercase__ = []
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] - other.component(a , a )
for j in range(self.__width )
]
matrix.append(a )
return Matrix(a , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self : Dict , a : float )-> Matrix:
"""simple docstring"""
...
@overload
def __mul__( self : str , a : Vector )-> Vector:
"""simple docstring"""
...
def __mul__( self : int , a : float | Vector )-> Vector | Matrix:
"""simple docstring"""
if isinstance(a , a ): # matrix-vector
if len(a ) == self.__width:
lowercase__ = zero_vector(self.__height )
for i in range(self.__height ):
lowercase__ = [
self.__matrix[i][j] * other.component(a )
for j in range(self.__width )
]
ans.change_component(a , sum(a ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(a , (int, float) ): # matrix-scalar
lowercase__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(a , self.__width , self.__height )
return None
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
return self.__height
def SCREAMING_SNAKE_CASE_ ( self : int )-> int:
"""simple docstring"""
return self.__width
def SCREAMING_SNAKE_CASE_ ( self : str , a : int , a : int )-> float:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : float )-> None:
"""simple docstring"""
if 0 <= x < self.__height and 0 <= y < self.__width:
lowercase__ = value
else:
raise Exception('change_component: indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : Any , a : int , a : int )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
lowercase__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(a ) ):
lowercase__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(a , self.__width - 1 , self.__height - 1 ).determinant()
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(a , a )
else:
raise Exception('Indices out of bounds' )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> float:
"""simple docstring"""
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowercase__ = [
self.__matrix[0][y] * self.cofactor(0 , a ) for y in range(self.__width )
]
return sum(a )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Matrix:
lowercase__ = [[0] * n for _ in range(_SCREAMING_SNAKE_CASE )]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Matrix:
random.seed(_SCREAMING_SNAKE_CASE )
lowercase__ = [
[random.randint(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )
]
return Matrix(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = GPTSwaTokenizer
_UpperCamelCase : str = False
_UpperCamelCase : List[str] = True
_UpperCamelCase : List[str] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ = GPTSwaTokenizer(a , eos_token='<unk>' , bos_token='<unk>' , pad_token='<unk>' )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str )-> Dict:
"""simple docstring"""
lowercase__ = 'This is a test'
lowercase__ = 'This is a test'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
lowercase__ = '<s>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Dict:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(a ) , 2_000 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_000 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> str:
"""simple docstring"""
lowercase__ = GPTSwaTokenizer(a )
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , [465, 287, 265, 631, 842] )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] , )
# fmt: on
lowercase__ = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowercase__ = tokenizer.convert_ids_to_tokens(a )
# fmt: off
self.assertListEqual(
a , ['▁I', '▁was', '▁bor', 'n', '▁in', '▁', '<0x39>', '2', '0', '0', '0', ',', '▁and', '▁this', '▁is', '▁f', 'al', 's', '<0xC3>', '<0xA9>', '.'] )
# fmt: on
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = GPTSwaTokenizer(a )
lowercase__ = ['This is a test', 'I was born in 92000, and this is falsé.']
lowercase__ = [
[465, 287, 265, 631, 842],
[262, 272, 1_525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(a , a ):
self.assertListEqual(tokenizer.encode_fast(a ) , a )
# Test that decode_fast returns the input text
for text, token_ids in zip(a , a ):
self.assertEqual(tokenizer.decode_fast(a ) , a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = [
'<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')',
'Hey there, how are you doing this fine day?',
'This is a text with a trailing spaces followed by a dot .',
'Häj sväjs lillebrör! =)',
'Det är inget fel på Mr. Cool',
]
# fmt: off
lowercase__ = {'input_ids': [[63_423, 5, 6_811, 14_954, 282, 816, 3_821, 63_466, 63_425, 63_462, 18, 63_978, 678, 301, 1_320, 63_423, 63_455, 63_458, 18, 63_982, 4_246, 3_940, 1_901, 47_789, 5_547, 18_994], [19_630, 1_100, 63_446, 1_342, 633, 544, 4_488, 593, 5_102, 2_416, 63_495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_652, 428, 268, 1_936, 515, 268, 58_593, 22_413, 9_106, 546, 268, 33_213, 63_979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55_130, 63_450, 924, 63_449, 2_249, 4_062, 1_558, 318, 63_504, 21_498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2_827, 2_559, 332, 6_575, 63_443, 26_801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='AI-Sweden/gpt-sw3-126m' , sequences=a , )
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = ['input_features', 'attention_mask']
def __init__( self : Tuple , a : List[str]=80 , a : List[Any]=16_000 , a : Any=80 , a : str=0.0 , a : List[Any]=True , a : List[str]=True , a : int=True , **a : str , )-> int:
"""simple docstring"""
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
lowercase__ = num_mel_bins
lowercase__ = do_ceptral_normalize
lowercase__ = normalize_means
lowercase__ = normalize_vars
lowercase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : np.ndarray , )-> np.ndarray:
"""simple docstring"""
lowercase__ = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
lowercase__ = torch.from_numpy(a ).unsqueeze(0 )
lowercase__ = ta_kaldi.fbank(a , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( a : np.ndarray , a : int , a : Optional[bool] = True , a : Optional[bool] = True , a : float = 0.0 , )-> np.ndarray:
"""simple docstring"""
if normalize_means:
lowercase__ = x[:input_length].mean(axis=0 )
lowercase__ = np.subtract(a , a )
if normalize_vars:
lowercase__ = x[:input_length].std(axis=0 )
lowercase__ = np.divide(a , a )
if input_length < x.shape[0]:
lowercase__ = padding_value
# make sure array is in float32
lowercase__ = x.astype(np.floataa )
return x
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : List[np.ndarray] , a : Optional[np.ndarray] = None )-> List[np.ndarray]:
"""simple docstring"""
lowercase__ = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(a , a , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(a , a )
]
def __call__( self : Tuple , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Union[bool, str, PaddingStrategy] = False , a : Optional[int] = None , a : bool = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , a : Optional[bool] = None , **a : List[Any] , )-> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowercase__ = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase__ = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase__ = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowercase__ = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase__ = [raw_speech]
# extract fbank features
lowercase__ = [self._extract_fbank_features(a ) for waveform in raw_speech]
# convert into correct format for padding
lowercase__ = BatchFeature({'input_features': features} )
lowercase__ = self.pad(
a , padding=a , max_length=a , truncation=a , pad_to_multiple_of=a , return_attention_mask=a , **a , )
# make sure list is in array format
lowercase__ = padded_inputs.get('input_features' )
if isinstance(input_features[0] , a ):
lowercase__ = [np.asarray(a , dtype=np.floataa ) for feature in input_features]
lowercase__ = padded_inputs.get('attention_mask' )
if attention_mask is not None:
lowercase__ = [np.asarray(a , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
lowercase__ = (
np.array(a , dtype=np.intaa )
if self._get_padding_strategies(a , max_length=a ) is not PaddingStrategy.DO_NOT_PAD
else None
)
lowercase__ = self.normalize(
padded_inputs['input_features'] , attention_mask=a )
if return_tensors is not None:
lowercase__ = padded_inputs.convert_to_tensors(a )
return padded_inputs
| 705
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowercase_ = False
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a )
lowercase__ = VersatileDiffusionPipeline.from_pretrained(a , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = generator.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt='first prompt' , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowercase__ = 'cyberpunk 2077'
lowercase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.dual_guided(
prompt=a , image=a , text_to_image_strength=0.75 , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = 'A painting of a squirrel eating a burger '
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe.text_to_image(
prompt=a , generator=a , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
lowercase__ = pipe.image_variation(a , generator=a , output_type='numpy' ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> None:
lowercase__ = len(_SCREAMING_SNAKE_CASE )
print('The following activities are selected:' )
# The first activity is always selected
lowercase__ = 0
print(_SCREAMING_SNAKE_CASE , end=',' )
# Consider rest of the activities
for j in range(_SCREAMING_SNAKE_CASE ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(_SCREAMING_SNAKE_CASE , end=',' )
lowercase__ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = [1, 3, 0, 5, 8, 5]
lowercase_ = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 706
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_SCREAMING_SNAKE_CASE ) == 1:
return True
lowercase__ = series[1] - series[0]
for index in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> float:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Input list must be a non empty list' )
lowercase__ = 0
for val in series:
answer += val
return answer / len(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 0
|
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Dict , *a : Dict , **a : int )-> None:
"""simple docstring"""
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 707
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 45
| 0
|
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = """▁"""
lowercase_ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : str = BigBirdTokenizer
_UpperCamelCase : Optional[Any] = BigBirdTokenizerFast
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = True
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> int:
"""simple docstring"""
super().setUp()
lowercase__ = self.tokenizer_class(a , keep_accents=a )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Dict:
"""simple docstring"""
lowercase__ = '<s>'
lowercase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<unk>' )
self.assertEqual(vocab_keys[1] , '<s>' )
self.assertEqual(vocab_keys[-1] , '[MASK]' )
self.assertEqual(len(a ) , 1_004 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Union[str, Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_000 )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Optional[Any]:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowercase__ = self.get_tokenizer()
lowercase__ = self.get_rust_tokenizer()
lowercase__ = 'I was born in 92000, and this is falsé.'
lowercase__ = tokenizer.tokenize(a )
lowercase__ = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowercase__ = tokenizer.encode(a , add_special_tokens=a )
lowercase__ = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowercase__ = self.get_rust_tokenizer()
lowercase__ = tokenizer.encode(a )
lowercase__ = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = BigBirdTokenizer(a , keep_accents=a )
lowercase__ = tokenizer.tokenize('This is a test' )
self.assertListEqual(a , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a ) , [285, 46, 10, 170, 382] , )
lowercase__ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
lowercase__ = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(
a , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase__ = tokenizer.convert_ids_to_tokens(a )
self.assertListEqual(
a , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
return BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> Tuple:
"""simple docstring"""
lowercase__ = 'Hello World!'
lowercase__ = [65, 18_536, 2_260, 101, 66]
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str )-> List[Any]:
"""simple docstring"""
lowercase__ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'
)
# fmt: off
lowercase__ = [65, 871, 419, 358, 946, 991, 2_521, 452, 358, 1_357, 387, 7_751, 3_536, 112, 985, 456, 126, 865, 938, 5_400, 5_734, 458, 1_368, 467, 786, 2_462, 5_246, 1_159, 633, 865, 4_519, 457, 582, 852, 2_557, 427, 916, 508, 405, 34_324, 497, 391, 408, 11_342, 1_244, 385, 100, 938, 985, 456, 574, 362, 12_597, 3_200, 3_129, 1_172, 66] # noqa: E231
# fmt: on
self.assertListEqual(a , self.big_tokenizer.encode(a ) )
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Dict:
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowercase__ = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowercase__ = ' '.join(a )
lowercase__ = self.big_tokenizer.encode_plus(a , return_tensors='pt' , return_token_type_ids=a )
lowercase__ = self.big_tokenizer.batch_encode_plus(
[sequence + ' ' + sequence] , return_tensors='pt' , return_token_type_ids=a )
lowercase__ = BigBirdConfig(attention_type='original_full' )
lowercase__ = BigBirdModel(a )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**a )
model(**a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> Dict:
"""simple docstring"""
lowercase__ = BigBirdTokenizer.from_pretrained('google/bigbird-roberta-base' )
lowercase__ = tokenizer.decode(tokenizer('Paris is the [MASK].' ).input_ids )
self.assertTrue(decoded_text == '[CLS] Paris is the[MASK].[SEP]' )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = {'input_ids': [[65, 39_286, 458, 36_335, 2_001, 456, 13_073, 13_266, 455, 113, 7_746, 1_741, 11_157, 391, 13_073, 13_266, 455, 113, 3_967, 35_412, 113, 4_936, 109, 3_870, 2_377, 113, 30_084, 45_720, 458, 134, 17_496, 112, 503, 11_672, 113, 118, 112, 5_665, 13_347, 38_687, 112, 1_496, 31_389, 112, 3_268, 47_264, 134, 962, 112, 16_377, 8_035, 23_130, 430, 12_169, 15_518, 28_592, 458, 146, 41_697, 109, 391, 12_169, 15_518, 16_689, 458, 146, 41_358, 109, 452, 726, 4_034, 111, 763, 35_412, 5_082, 388, 1_903, 111, 9_051, 391, 2_870, 48_918, 1_900, 1_123, 550, 998, 112, 9_586, 15_985, 455, 391, 410, 22_955, 37_636, 114, 66], [65, 448, 17_496, 419, 3_663, 385, 763, 113, 27_533, 2_870, 3_283, 13_043, 1_639, 24_713, 523, 656, 24_013, 18_550, 2_521, 517, 27_014, 21_244, 420, 1_212, 1_465, 391, 927, 4_833, 388, 578, 11_786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2_169, 7_687, 21_932, 18_146, 726, 363, 17_032, 3_391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-roberta-base' , revision='215c99f1600e06f83acce68422f2035b2b5c3510' , )
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class SCREAMING_SNAKE_CASE :
def __init__( self : int , a : Tuple )-> Tuple:
"""simple docstring"""
if isinstance(a , a ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
lowercase__ = deepcopy(a )
elif os.path.exists(a ):
with io.open(a , 'r' , encoding='utf-8' ) as f:
lowercase__ = json.load(a )
else:
try:
lowercase__ = baseaa.urlsafe_baadecode(a ).decode('utf-8' )
lowercase__ = json.loads(a )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
lowercase__ = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = self.get_value('zero_optimization.stage' , -1 )
# offload
lowercase__ = False
if self.is_zeroa() or self.is_zeroa():
lowercase__ = set(['cpu', 'nvme'] )
lowercase__ = set(
[
self.get_value('zero_optimization.offload_optimizer.device' ),
self.get_value('zero_optimization.offload_param.device' ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
lowercase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = self.config
# find the config node of interest if it exists
lowercase__ = ds_key_long.split('.' )
lowercase__ = nodes.pop()
for node in nodes:
lowercase__ = config.get(a )
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict , a : List[Any]=None )-> Tuple:
"""simple docstring"""
lowercase__ , lowercase__ = self.find_config_node(a )
if config is None:
return default
return config.get(a , a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : int , a : Optional[Any]=False )-> int:
"""simple docstring"""
lowercase__ = self.config
# find the config node of interest if it exists
lowercase__ = ds_key_long.split('.' )
for node in nodes:
lowercase__ = config
lowercase__ = config.get(a )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(a )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = self.get_value(a )
return False if value is None else bool(a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] )-> List[Any]:
"""simple docstring"""
lowercase__ = self.get_value(a )
return False if value is None else not bool(a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[str]:
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
return self._offload
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = engine
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : Dict , **a : List[str] )-> List[str]:
"""simple docstring"""
self.engine.backward(a , **a )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[int] , a : str )-> Any:
"""simple docstring"""
super().__init__(a , device_placement=a , scaler=a )
lowercase__ = hasattr(self.optimizer , 'overflow' )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Optional[Any]=None )-> str:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Any:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> str:
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : str , a : Optional[int] , a : Union[str, Any] )-> Tuple:
"""simple docstring"""
super().__init__(a , a )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> str:
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , a : List[str] , a : Dict=0.001 , a : int=0 , **a : Any )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = params
lowercase__ = lr
lowercase__ = weight_decay
lowercase__ = kwargs
class SCREAMING_SNAKE_CASE :
def __init__( self : str , a : List[str] , a : List[Any]=None , a : Dict=0 , **a : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
lowercase__ = optimizer
lowercase__ = total_num_steps
lowercase__ = warmup_num_steps
lowercase__ = kwargs
| 709
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 710
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : Optional[Any] , a : UNetaDModel , a : UNetaDModel , a : DDPMScheduler , a : Any , )-> Dict:
"""simple docstring"""
super().__init__()
lowercase__ = value_function
lowercase__ = unet
lowercase__ = scheduler
lowercase__ = env
lowercase__ = env.get_dataset()
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ = {}
for key in self.data.keys():
try:
lowercase__ = self.data[key].std()
except: # noqa: E722
pass
lowercase__ = env.observation_space.shape[0]
lowercase__ = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Any , a : int )-> Dict:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : List[str] )-> str:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : Tuple )-> Tuple:
"""simple docstring"""
if type(a ) is dict:
return {k: self.to_torch(a ) for k, v in x_in.items()}
elif torch.is_tensor(a ):
return x_in.to(self.unet.device )
return torch.tensor(a , device=self.unet.device )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : Optional[int] , a : Dict , a : Optional[Any] )-> List[Any]:
"""simple docstring"""
for key, val in cond.items():
lowercase__ = val.clone()
return x_in
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[Any] , a : Any , a : Optional[Any] , a : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = x.shape[0]
lowercase__ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ = torch.full((batch_size,) , a , device=self.unet.device , dtype=torch.long )
for _ in range(a ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ = self.value_function(x.permute(0 , 2 , 1 ) , a ).sample
lowercase__ = torch.autograd.grad([y.sum()] , [x] )[0]
lowercase__ = self.scheduler._get_variance(a )
lowercase__ = torch.exp(0.5 * posterior_variance )
lowercase__ = model_std * grad
lowercase__ = 0
lowercase__ = x.detach()
lowercase__ = x + scale * grad
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.unet(x.permute(0 , 2 , 1 ) , a ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
lowercase__ = self.scheduler.step(a , a , a , predict_epsilon=a )['prev_sample']
# apply conditions to the trajectory (set the initial state)
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
return x, y
def __call__( self : Any , a : Tuple , a : int=64 , a : Tuple=32 , a : List[Any]=2 , a : List[str]=0.1 )-> List[Any]:
"""simple docstring"""
lowercase__ = self.normalize(a , 'observations' )
lowercase__ = obs[None].repeat(a , axis=0 )
lowercase__ = {0: self.to_torch(a )}
lowercase__ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ = randn_tensor(a , device=self.unet.device )
lowercase__ = self.reset_xa(a , a , self.action_dim )
lowercase__ = self.to_torch(a )
# run the diffusion process
lowercase__ , lowercase__ = self.run_diffusion(a , a , a , a )
# sort output trajectories by value
lowercase__ = y.argsort(0 , descending=a ).squeeze()
lowercase__ = x[sorted_idx]
lowercase__ = sorted_values[:, :, : self.action_dim]
lowercase__ = actions.detach().cpu().numpy()
lowercase__ = self.de_normalize(a , key='actions' )
# select the action with the highest value
if y is not None:
lowercase__ = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ = np.random.randint(0 , a )
lowercase__ = denorm_actions[selected_index, 0]
return denorm_actions
| 45
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 100 , ) -> float:
lowercase__ = x_start
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
lowercase__ = 0.0
for _ in range(_SCREAMING_SNAKE_CASE ):
# Approximates curve as a sequence of linear lines and sums their length
lowercase__ = (x_end - x_start) / steps + xa
lowercase__ = fnc(_SCREAMING_SNAKE_CASE )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowercase__ = xa
lowercase__ = fxa
return length
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return math.sin(10 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
lowercase_ = 10
while i <= 100_000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 711
|
from PIL import Image
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Image:
def brightness(_SCREAMING_SNAKE_CASE ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError('level must be between -255.0 (black) and 255.0 (white)' )
return img.point(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
lowercase_ = change_brightness(img, 100)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 45
| 0
|
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , a : int , a : int , a : float = 0 )-> None:
"""simple docstring"""
lowercase__ , lowercase__ = row, column
lowercase__ = [[default_value for c in range(a )] for r in range(a )]
def __str__( self : int )-> str:
"""simple docstring"""
lowercase__ = f"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowercase__ = 0
for row_vector in self.array:
for obj in row_vector:
lowercase__ = max(a , len(str(a ) ) )
lowercase__ = f"""%{max_element_length}s"""
# Make string and return
def single_line(a : list[float] ) -> str:
nonlocal string_format_identifier
lowercase__ = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(a ) for row_vector in self.array )
return s
def __repr__( self : List[str] )-> str:
"""simple docstring"""
return str(self )
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : tuple[int, int] )-> bool:
"""simple docstring"""
if not (isinstance(a , (list, tuple) ) and len(a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : List[Any] , a : tuple[int, int] )-> Any:
"""simple docstring"""
assert self.validate_indicies(a )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int , a : tuple[int, int] , a : float )-> None:
"""simple docstring"""
assert self.validate_indicies(a )
lowercase__ = value
def __add__( self : List[Any] , a : Matrix )-> Matrix:
"""simple docstring"""
assert isinstance(a , a )
assert self.row == another.row and self.column == another.column
# Add
lowercase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ = self[r, c] + another[r, c]
return result
def __neg__( self : str )-> Matrix:
"""simple docstring"""
lowercase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ = -self[r, c]
return result
def __sub__( self : int , a : Matrix )-> Matrix:
"""simple docstring"""
return self + (-another)
def __mul__( self : Any , a : int | float | Matrix )-> Matrix:
"""simple docstring"""
if isinstance(a , (int, float) ): # Scalar multiplication
lowercase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ = self[r, c] * another
return result
elif isinstance(a , a ): # Matrix multiplication
assert self.column == another.row
lowercase__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowercase__ = f"""Unsupported type given for another ({type(a )})"""
raise TypeError(a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Matrix:
"""simple docstring"""
lowercase__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowercase__ = self[r, c]
return result
def SCREAMING_SNAKE_CASE_ ( self : int , a : Matrix , a : Matrix )-> Any:
"""simple docstring"""
assert isinstance(a , a ) and isinstance(a , a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowercase__ = v.transpose()
lowercase__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __UpperCamelCase () -> None:
# a^(-1)
lowercase__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowercase__ = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowercase__ = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ = 1, 2, -3
lowercase__ = Matrix(3 , 1 , 0 )
lowercase__ , lowercase__ , lowercase__ = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )}""" )
def __UpperCamelCase () -> None:
import doctest
doctest.testmod()
testa()
| 712
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : Any , a : str , a : List[Any]=7 , a : int=3 , a : int=18 , a : Optional[Any]=30 , a : Optional[int]=400 , a : int=True , a : Tuple=None , a : Optional[Any]=True , a : str=False , a : str=True , a : int=True , a : Tuple=[0.5, 0.5, 0.5] , a : Any=[0.5, 0.5, 0.5] , )-> Optional[int]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {'height': 18, 'width': 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Optional[Any] = DonutImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[Any]:
"""simple docstring"""
lowercase__ = DonutImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE_ ( self : Any )-> int:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size' ) )
self.assertTrue(hasattr(a , 'do_thumbnail' ) )
self.assertTrue(hasattr(a , 'do_align_long_axis' ) )
self.assertTrue(hasattr(a , 'do_pad' ) )
self.assertTrue(hasattr(a , 'do_normalize' ) )
self.assertTrue(hasattr(a , 'image_mean' ) )
self.assertTrue(hasattr(a , 'image_std' ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Dict:
"""simple docstring"""
pass
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Tuple:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
lowercase__ = image_processing(a , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 45
| 0
|
from __future__ import annotations
lowercase_ = 8.988E9 # units = N * m^s * C^-2
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict[str, float]:
lowercase__ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
lowercase__ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase__ = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase__ = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase__ = (COULOMBS_CONSTANT * charge_product / abs(_SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713
|
import math
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(_SCREAMING_SNAKE_CASE )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowercase_ = """Enter the base and the power separated by a comma: """
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
lowercase_ , lowercase_ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowercase_ = res(xa, ya)
lowercase_ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""")
| 45
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714
|
class SCREAMING_SNAKE_CASE : # Public class to implement a graph
def __init__( self : int , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = row
lowercase__ = col
lowercase__ = graph
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : int , a : int , a : list[list[bool]] )-> bool:
"""simple docstring"""
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : int , a : int , a : list[list[bool]] )-> None:
"""simple docstring"""
lowercase__ = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
lowercase__ = [-1, 0, 1, -1, 1, -1, 0, 1]
lowercase__ = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , a ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int: # And finally, count all islands.
"""simple docstring"""
lowercase__ = [[False for j in range(self.COL )] for i in range(self.ROW )]
lowercase__ = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(a , a , a )
count += 1
return count
| 45
| 0
|
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str | Literal[False]:
lowercase__ = list(_SCREAMING_SNAKE_CASE )
lowercase__ = list(_SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count += 1
lowercase__ = '_'
if count > 1:
return False
else:
return "".join(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> list[str]:
lowercase__ = []
while True:
lowercase__ = ['$'] * len(_SCREAMING_SNAKE_CASE )
lowercase__ = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = compare_string(binary[i] , binary[j] )
if k is False:
lowercase__ = '*'
lowercase__ = '*'
temp.append('X' )
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(_SCREAMING_SNAKE_CASE ) == 0:
return pi
lowercase__ = list(set(_SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[str]:
lowercase__ = []
for minterm in minterms:
lowercase__ = ''
for _ in range(_SCREAMING_SNAKE_CASE ):
lowercase__ = str(minterm % 2 ) + string
minterm //= 2
temp.append(_SCREAMING_SNAKE_CASE )
return temp
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> bool:
lowercase__ = list(_SCREAMING_SNAKE_CASE )
lowercase__ = list(_SCREAMING_SNAKE_CASE )
lowercase__ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[str]:
lowercase__ = []
lowercase__ = [0] * len(_SCREAMING_SNAKE_CASE )
for i in range(len(chart[0] ) ):
lowercase__ = 0
lowercase__ = -1
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if chart[j][i] == 1:
count += 1
lowercase__ = j
if count == 1:
lowercase__ = 1
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
temp.append(prime_implicants[i] )
while True:
lowercase__ = 0
lowercase__ = -1
lowercase__ = 0
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = chart[i].count(1 )
if count_n > max_n:
lowercase__ = count_n
lowercase__ = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = 0
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> list[list[int]]:
lowercase__ = [[0 for x in range(len(_SCREAMING_SNAKE_CASE ) )] for x in range(len(_SCREAMING_SNAKE_CASE ) )]
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = prime_implicants[i].count('_' )
for j in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_for_table(prime_implicants[i] , binary[j] , _SCREAMING_SNAKE_CASE ):
lowercase__ = 1
return chart
def __UpperCamelCase () -> None:
lowercase__ = int(input('Enter the no. of variables\n' ) )
lowercase__ = [
float(_SCREAMING_SNAKE_CASE )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
lowercase__ = decimal_to_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = check(_SCREAMING_SNAKE_CASE )
print('Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
lowercase__ = prime_implicant_chart(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = selection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
print('Essential Prime Implicants are:' )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 715
|
from string import ascii_uppercase
lowercase_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowercase__ = ''
lowercase__ = 0
lowercase__ = 0
while div != 1:
lowercase__ , lowercase__ = divmod(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if base >= 11 and 9 < mod < 36:
lowercase__ = ALPHABET_VALUES[str(_SCREAMING_SNAKE_CASE )]
else:
lowercase__ = str(_SCREAMING_SNAKE_CASE )
new_value += actual_value
lowercase__ = num // base
lowercase__ = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_SCREAMING_SNAKE_CASE )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 45
| 0
|
from __future__ import annotations
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ) -> tuple[int, float, str]:
lowercase__ = cipher_alphabet or [chr(_SCREAMING_SNAKE_CASE ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowercase__ = {
'a': 0.0_8_4_9_7,
'b': 0.0_1_4_9_2,
'c': 0.0_2_2_0_2,
'd': 0.0_4_2_5_3,
'e': 0.1_1_1_6_2,
'f': 0.0_2_2_2_8,
'g': 0.0_2_0_1_5,
'h': 0.0_6_0_9_4,
'i': 0.0_7_5_4_6,
'j': 0.0_0_1_5_3,
'k': 0.0_1_2_9_2,
'l': 0.0_4_0_2_5,
'm': 0.0_2_4_0_6,
'n': 0.0_6_7_4_9,
'o': 0.0_7_5_0_7,
'p': 0.0_1_9_2_9,
'q': 0.0_0_0_9_5,
'r': 0.0_7_5_8_7,
's': 0.0_6_3_2_7,
't': 0.0_9_3_5_6,
'u': 0.0_2_7_5_8,
'v': 0.0_0_9_7_8,
'w': 0.0_2_5_6_0,
'x': 0.0_0_1_5_0,
'y': 0.0_1_9_9_4,
'z': 0.0_0_0_7_7,
}
else:
# Custom frequencies dictionary
lowercase__ = frequencies_dict
if not case_sensitive:
lowercase__ = ciphertext.lower()
# Chi squared statistic values
lowercase__ = {}
# cycle through all of the shifts
for shift in range(len(_SCREAMING_SNAKE_CASE ) ):
lowercase__ = ''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowercase__ = (alphabet_letters.index(letter.lower() ) - shift) % len(
_SCREAMING_SNAKE_CASE )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowercase__ = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowercase__ = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ = decrypted_with_shift.lower().count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowercase__ = decrypted_with_shift.count(_SCREAMING_SNAKE_CASE )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowercase__ = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowercase__ = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowercase__ = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_SCREAMING_SNAKE_CASE ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowercase__ = min(
_SCREAMING_SNAKE_CASE , key=_SCREAMING_SNAKE_CASE , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowercase__
) , (
lowercase__
) ,
) = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 716
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , a : Any , a : Optional[int]=13 , a : Tuple=30 , a : Union[str, Any]=2 , a : List[str]=3 , a : Dict=True , a : List[str]=True , a : List[Any]=32 , a : List[str]=5 , a : Optional[int]=4 , a : List[str]=37 , a : Dict="gelu" , a : Dict=0.1 , a : List[str]=0.1 , a : int=10 , a : List[str]=0.02 , a : int=None , a : List[str]=2 , )-> Dict:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = is_training
lowercase__ = use_labels
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = scope
lowercase__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase__ = (image_size // patch_size) ** 2
lowercase__ = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Any:
"""simple docstring"""
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : List[Any] , a : List[str] , a : Dict )-> Optional[Any]:
"""simple docstring"""
lowercase__ = ViTModel(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] , a : int , a : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowercase__ = model(a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForMaskedImageModeling(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : List[str] , a : int , a : List[Any] )-> str:
"""simple docstring"""
lowercase__ = self.type_sequence_label_size
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase__ = 1
lowercase__ = ViTForImageClassification(a )
model.to(a )
model.eval()
lowercase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase__ = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
) , (
lowercase__
) , (
lowercase__
) ,
) = config_and_inputs
lowercase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
_UpperCamelCase : Union[str, Any] = (
{'feature-extraction': ViTModel, 'image-classification': ViTForImageClassification}
if is_torch_available()
else {}
)
_UpperCamelCase : int = True
_UpperCamelCase : int = False
_UpperCamelCase : Union[str, Any] = False
_UpperCamelCase : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModelTester(self )
lowercase__ = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Optional[int]:
"""simple docstring"""
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(a )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Tuple:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[Any]:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = ViTModel.from_pretrained(a )
self.assertIsNotNone(a )
def __UpperCamelCase () -> str:
lowercase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] )-> List[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224' ).to(a )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(**a )
# verify the logits
lowercase__ = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowercase__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int )-> List[str]:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' ).to(a )
lowercase__ = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=480 )
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass
with torch.no_grad():
lowercase__ = model(a , interpolate_pos_encoding=a )
# verify the logits
lowercase__ = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , a )
lowercase__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : str )-> str:
"""simple docstring"""
lowercase__ = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto' )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=a , return_tensors='pt' )
lowercase__ = inputs.pixel_values.to(a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase__ = model(a )
| 45
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@parameterized.expand([(None,), ('foo.json',)] )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : List[str] )-> Any:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a , config_name=a )
lowercase__ = GenerationConfig.from_pretrained(a , config_name=a )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , a )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Optional[Any]:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained('gpt2' )
lowercase__ = GenerationConfig.from_model_config(a )
lowercase__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(a , a )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def SCREAMING_SNAKE_CASE_ ( self : str )-> Any:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = {
'max_new_tokens': 1_024,
'foo': 'bar',
}
lowercase__ = copy.deepcopy(a )
lowercase__ = generation_config.update(**a )
# update_kwargs was not modified (no side effects)
self.assertEqual(a , a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(a , {'foo': 'bar'} )
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> Tuple:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = 'bar'
with tempfile.TemporaryDirectory('test-generation-config' ) as tmp_dir:
generation_config.save_pretrained(a )
lowercase__ = GenerationConfig.from_pretrained(a )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , 'bar' )
lowercase__ = GenerationConfig.from_model_config(a )
assert not hasattr(a , 'foo' ) # no new kwargs should be initialized if from config
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , a )
self.assertEqual(default_config.num_beams , 1 )
lowercase__ = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , a )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(a )
lowercase__ = GenerationConfig.from_pretrained(a , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , a )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple )-> List[str]:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(a )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-generation-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-generation-config-org' )
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Optional[int]:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('test-generation-config' , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-generation-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='test-generation-config' , push_to_hub=a , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f"""{USER}/test-generation-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=a , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub('valid_org/test-generation-config-org' , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-generation-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
a , repo_id='valid_org/test-generation-config-org' , push_to_hub=a , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained('valid_org/test-generation-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(a , getattr(a , a ) )
| 717
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
stooge(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) - 1 )
return arr
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
lowercase__ , lowercase__ = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
lowercase__ = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
# Recursively sort last 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , i + t , (_SCREAMING_SNAKE_CASE) )
# Recursively sort first 2/3 elements
stooge(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (h - t) )
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 45
| 0
|
from datetime import datetime
import requests
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> bytes:
lowercase__ = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
lowercase__ = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(_SCREAMING_SNAKE_CASE ).content
if __name__ == "__main__":
lowercase_ = input("""Enter Video/IGTV url: """).strip()
lowercase_ = f'''{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4'''
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f'''Done. Video saved to disk as {file_name}.''')
| 718
|
from scipy.stats import spearmanr
import datasets
lowercase_ = """
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
"""
lowercase_ = """
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{'spearmanr': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric(\"spearmanr\")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results['spearmanr'])
-0.7
>>> print(round(results['spearmanr_pvalue'], 2))
0.19
"""
lowercase_ = R"""\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE (datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : int , a : str , a : Any , a : str=False )-> Optional[int]:
"""simple docstring"""
lowercase__ = spearmanr(a , a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 45
| 0
|
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
lowercase__ = quote(_SCREAMING_SNAKE_CASE )
return hfh.hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' , revision=_SCREAMING_SNAKE_CASE )
| 719
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 50 ) -> int:
lowercase__ = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 45
| 0
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
def __init__( self : str , *a : Any , **a : int )-> None:
"""simple docstring"""
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 720
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
lowercase_ = {
"""b0""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 224,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_280,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 240,
"""dropout_rate""": 0.2,
"""dw_padding""": [16],
},
"""b2""": {
"""hidden_dim""": 1_408,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 260,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 16],
},
"""b3""": {
"""hidden_dim""": 1_536,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 300,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 18],
},
"""b4""": {
"""hidden_dim""": 1_792,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 380,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_048,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 456,
"""dropout_rate""": 0.4,
"""dw_padding""": [13, 27],
},
"""b6""": {
"""hidden_dim""": 2_304,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 528,
"""dropout_rate""": 0.5,
"""dw_padding""": [31],
},
"""b7""": {
"""hidden_dim""": 2_560,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 600,
"""dropout_rate""": 0.5,
"""dw_padding""": [18],
},
}
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = EfficientNetConfig()
lowercase__ = CONFIG_MAP[model_name]['hidden_dim']
lowercase__ = CONFIG_MAP[model_name]['width_coef']
lowercase__ = CONFIG_MAP[model_name]['depth_coef']
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = CONFIG_MAP[model_name]['dropout_rate']
lowercase__ = CONFIG_MAP[model_name]['dw_padding']
lowercase__ = 'huggingface/label-files'
lowercase__ = 'imagenet-1k-id2label.json'
lowercase__ = 1000
lowercase__ = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase__ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
return config
def __UpperCamelCase () -> Tuple:
lowercase__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase__ = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = EfficientNetImageProcessor(
size={'height': size, 'width': size} , image_mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , image_std=[0.4_7_8_5_3_9_4_4, 0.4_7_3_2_8_6_4, 0.4_7_4_3_4_1_6_3] , do_center_crop=_SCREAMING_SNAKE_CASE , )
return preprocessor
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = [v.split('_' )[0].split('block' )[1] for v in original_param_names if v.startswith('block' )]
lowercase__ = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowercase__ = len(_SCREAMING_SNAKE_CASE )
lowercase__ = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE , range(_SCREAMING_SNAKE_CASE ) )}
lowercase__ = []
rename_keys.append(('stem_conv/kernel:0', 'embeddings.convolution.weight') )
rename_keys.append(('stem_bn/gamma:0', 'embeddings.batchnorm.weight') )
rename_keys.append(('stem_bn/beta:0', 'embeddings.batchnorm.bias') )
rename_keys.append(('stem_bn/moving_mean:0', 'embeddings.batchnorm.running_mean') )
rename_keys.append(('stem_bn/moving_variance:0', 'embeddings.batchnorm.running_var') )
for b in block_names:
lowercase__ = block_name_mapping[b]
rename_keys.append((F"""block{b}_expand_conv/kernel:0""", F"""encoder.blocks.{hf_b}.expansion.expand_conv.weight""") )
rename_keys.append((F"""block{b}_expand_bn/gamma:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.weight""") )
rename_keys.append((F"""block{b}_expand_bn/beta:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.bias""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_expand_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.expansion.expand_bn.running_var""") )
rename_keys.append(
(F"""block{b}_dwconv/depthwise_kernel:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight""") )
rename_keys.append((F"""block{b}_bn/gamma:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight""") )
rename_keys.append((F"""block{b}_bn/beta:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias""") )
rename_keys.append(
(F"""block{b}_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean""") )
rename_keys.append(
(F"""block{b}_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var""") )
rename_keys.append((F"""block{b}_se_reduce/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.weight""") )
rename_keys.append((F"""block{b}_se_reduce/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.reduce.bias""") )
rename_keys.append((F"""block{b}_se_expand/kernel:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.weight""") )
rename_keys.append((F"""block{b}_se_expand/bias:0""", F"""encoder.blocks.{hf_b}.squeeze_excite.expand.bias""") )
rename_keys.append(
(F"""block{b}_project_conv/kernel:0""", F"""encoder.blocks.{hf_b}.projection.project_conv.weight""") )
rename_keys.append((F"""block{b}_project_bn/gamma:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.weight""") )
rename_keys.append((F"""block{b}_project_bn/beta:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.bias""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_mean:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_mean""") )
rename_keys.append(
(F"""block{b}_project_bn/moving_variance:0""", F"""encoder.blocks.{hf_b}.projection.project_bn.running_var""") )
rename_keys.append(('top_conv/kernel:0', 'encoder.top_conv.weight') )
rename_keys.append(('top_bn/gamma:0', 'encoder.top_bn.weight') )
rename_keys.append(('top_bn/beta:0', 'encoder.top_bn.bias') )
rename_keys.append(('top_bn/moving_mean:0', 'encoder.top_bn.running_mean') )
rename_keys.append(('top_bn/moving_variance:0', 'encoder.top_bn.running_var') )
lowercase__ = {}
for item in rename_keys:
if item[0] in original_param_names:
lowercase__ = 'efficientnet.' + item[1]
lowercase__ = 'classifier.weight'
lowercase__ = 'classifier.bias'
return key_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowercase__ = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
lowercase__ = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowercase__ = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE , weights='imagenet' , input_tensor=_SCREAMING_SNAKE_CASE , input_shape=_SCREAMING_SNAKE_CASE , pooling=_SCREAMING_SNAKE_CASE , classes=1000 , classifier_activation='softmax' , )
lowercase__ = original_model.trainable_variables
lowercase__ = original_model.non_trainable_variables
lowercase__ = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowercase__ = param.numpy()
lowercase__ = list(tf_params.keys() )
# Load HuggingFace model
lowercase__ = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowercase__ = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowercase__ = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print('Converting parameters...' )
lowercase__ = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowercase__ = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowercase__ = preprocessor(images=prepare_img() , return_tensors='pt' )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowercase__ = hf_model(**_SCREAMING_SNAKE_CASE )
lowercase__ = outputs.logits.detach().numpy()
# Original model inference
lowercase__ = False
lowercase__ = CONFIG_MAP[model_name]['image_size']
lowercase__ = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
lowercase__ = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowercase__ = np.expand_dims(_SCREAMING_SNAKE_CASE , axis=0 )
lowercase__ = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "The predicted logits are not the same."
print('Model outputs match!' )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(F"""Pushing converted {model_name} to the hub...""" )
lowercase__ = F"""efficientnet-{model_name}"""
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
lowercase_ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 45
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/xglm-564M""": """https://huggingface.co/facebook/xglm-564M/resolve/main/config.json""",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Optional[Any] = 'xglm'
_UpperCamelCase : str = ['past_key_values']
_UpperCamelCase : Union[str, Any] = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a : List[Any]=256_008 , a : List[str]=2_048 , a : List[str]=1_024 , a : int=4_096 , a : int=24 , a : List[str]=16 , a : Dict="gelu" , a : Optional[int]=0.1 , a : Optional[Any]=0.1 , a : Optional[Any]=0.0 , a : int=0.0 , a : Optional[int]=0.02 , a : Union[str, Any]=True , a : List[Any]=True , a : Any=2 , a : Dict=1 , a : str=0 , a : Tuple=2 , **a : Optional[Any] , )-> List[Any]:
"""simple docstring"""
lowercase__ = vocab_size
lowercase__ = max_position_embeddings
lowercase__ = d_model
lowercase__ = ffn_dim
lowercase__ = num_layers
lowercase__ = attention_heads
lowercase__ = activation_function
lowercase__ = dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = layerdrop
lowercase__ = init_std
lowercase__ = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase__ = use_cache
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 721
|
import argparse
import json
import subprocess
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowercase__ = []
lowercase__ = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
' https://api.github.com/repos/huggingface/transformers/actions/runners'
)
lowercase__ = subprocess.run(_SCREAMING_SNAKE_CASE , shell=_SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE )
lowercase__ = output.stdout.decode('utf-8' )
lowercase__ = json.loads(_SCREAMING_SNAKE_CASE )
lowercase__ = status['runners']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(_SCREAMING_SNAKE_CASE )
# save the result so we can report them on Slack
with open('offline_runners.txt' , 'w' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowercase__ = '\n'.join([x['name'] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
return values.split(',' )
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--target_runners""",
default=None,
type=list_str,
required=True,
help="""Comma-separated list of runners to check status.""",
)
parser.add_argument(
"""--token""", default=None, type=str, required=True, help="""A token that has actions:read permission."""
)
lowercase_ = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 45
| 0
|
def __UpperCamelCase () -> int:
return 1
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE = 200 ) -> int:
return two_pound(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 700
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Tuple = 'ClapFeatureExtractor'
_UpperCamelCase : Union[str, Any] = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : List[Any] , a : int , a : str )-> Any:
"""simple docstring"""
super().__init__(a , a )
def __call__( self : Any , a : Tuple=None , a : Optional[int]=None , a : int=None , **a : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = kwargs.pop('sampling_rate' , a )
if text is None and audios is None:
raise ValueError('You have to specify either text or audios. Both cannot be none.' )
if text is not None:
lowercase__ = self.tokenizer(a , return_tensors=a , **a )
if audios is not None:
lowercase__ = self.feature_extractor(
a , sampling_rate=a , return_tensors=a , **a )
if text is not None and audios is not None:
lowercase__ = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def SCREAMING_SNAKE_CASE_ ( self : str , *a : Dict , **a : int )-> Optional[int]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , *a : int , **a : Dict )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
lowercase__ = self.tokenizer.model_input_names
lowercase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 45
| 0
|
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(""".""")
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
F"""{test_file} instead.""" )
lowercase__ = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(F"""`test_file` should be a python file. Got {test_fn} instead.""" )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
F"""`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.""" )
lowercase__ = components[:-1] + [test_fn.replace('.py' , '' )]
lowercase__ = '.'.join(_SCREAMING_SNAKE_CASE )
return test_module_path
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = get_module_path(_SCREAMING_SNAKE_CASE )
lowercase__ = importlib.import_module(_SCREAMING_SNAKE_CASE )
return test_module
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = []
lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = []
lowercase__ = get_test_module(_SCREAMING_SNAKE_CASE )
for attr in dir(_SCREAMING_SNAKE_CASE ):
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase__ = getattr(_SCREAMING_SNAKE_CASE , 'all_model_classes' , [] )
if len(_SCREAMING_SNAKE_CASE ) > 0:
test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = test_class()
if hasattr(_SCREAMING_SNAKE_CASE , 'setUp' ):
test.setUp()
lowercase__ = None
if hasattr(_SCREAMING_SNAKE_CASE , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase__ = test.model_tester.__class__
return model_tester
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = []
for test_class in test_classes:
lowercase__ = get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE )
if tester_class is not None:
tester_classes.append(_SCREAMING_SNAKE_CASE )
# sort with class names
return sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x.__name__ )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = get_test_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {test_class: get_model_tester_from_test_class(_SCREAMING_SNAKE_CASE ) for test_class in test_classes}
return test_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {
model_class: get_test_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_test_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = get_model_classes(_SCREAMING_SNAKE_CASE )
lowercase__ = {
model_class: get_tester_classes_for_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for model_class in model_classes
}
return model_to_tester_mapping
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return o.__name__
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ):
return [to_json(_SCREAMING_SNAKE_CASE ) for x in o]
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {to_json(_SCREAMING_SNAKE_CASE ): to_json(_SCREAMING_SNAKE_CASE ) for k, v in o.items()}
else:
return o
| 701
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowercase_ = {
"""vocab_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""",
"""moussaKam/barthez-orangesum-title""": (
"""https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"""
),
},
}
lowercase_ = {
"""moussaKam/mbarthez""": 1_024,
"""moussaKam/barthez""": 1_024,
"""moussaKam/barthez-orangesum-title""": 1_024,
}
lowercase_ = """▁"""
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Dict = VOCAB_FILES_NAMES
_UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
_UpperCamelCase : int = BarthezTokenizer
def __init__( self : List[Any] , a : Union[str, Any]=None , a : Optional[Any]=None , a : Dict="<s>" , a : Union[str, Any]="</s>" , a : List[str]="</s>" , a : Optional[Any]="<s>" , a : int="<unk>" , a : str="<pad>" , a : Optional[int]="<mask>" , **a : Union[str, Any] , )-> Tuple:
"""simple docstring"""
lowercase__ = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
lowercase__ = vocab_file
lowercase__ = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None )-> List[int]:
"""simple docstring"""
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Dict , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 45
| 0
|
import torch
def __UpperCamelCase () -> Dict:
if torch.cuda.is_available():
lowercase__ = torch.cuda.device_count()
else:
lowercase__ = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 702
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE (UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = StableDiffusionSAGPipeline
_UpperCamelCase : str = TEXT_TO_IMAGE_PARAMS
_UpperCamelCase : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCamelCase : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowercase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
lowercase__ = CLIPTextModel(a )
lowercase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowercase__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : Optional[int] , a : Any=0 )-> Union[str, Any]:
"""simple docstring"""
if str(a ).startswith('mps' ):
lowercase__ = torch.manual_seed(a )
else:
lowercase__ = torch.Generator(device=a ).manual_seed(a )
lowercase__ = {
'prompt': '.',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 1.0,
'sag_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Any:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str )-> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Tuple )-> Optional[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' )
lowercase__ = output.images
lowercase__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> List[Any]:
"""simple docstring"""
lowercase__ = StableDiffusionSAGPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
lowercase__ = sag_pipe.to(a )
sag_pipe.set_progress_bar_config(disable=a )
lowercase__ = '.'
lowercase__ = torch.manual_seed(0 )
lowercase__ = sag_pipe(
[prompt] , width=768 , height=512 , generator=a , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type='np' , )
lowercase__ = output.images
assert image.shape == (1, 512, 768, 3)
| 45
| 0
|
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> List[Any]:
lowercase__ = {}
lowercase__ = tokenizer(example['content'] , truncation=_SCREAMING_SNAKE_CASE )['input_ids']
lowercase__ = len(example['content'] ) / len(output['input_ids'] )
return output
lowercase_ = HfArgumentParser(PretokenizationArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
lowercase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
lowercase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 703
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : Any = 'deit'
def __init__( self : Any , a : Union[str, Any]=768 , a : Optional[Any]=12 , a : Union[str, Any]=12 , a : Optional[int]=3_072 , a : Optional[int]="gelu" , a : Optional[Any]=0.0 , a : List[Any]=0.0 , a : int=0.02 , a : List[str]=1E-1_2 , a : Optional[int]=224 , a : Tuple=16 , a : List[Any]=3 , a : List[str]=True , a : Any=16 , **a : Union[str, Any] , )-> int:
"""simple docstring"""
super().__init__(**a )
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = image_size
lowercase__ = patch_size
lowercase__ = num_channels
lowercase__ = qkv_bias
lowercase__ = encoder_stride
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE_ ( self : int )-> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> float:
"""simple docstring"""
return 1E-4
| 45
| 0
|
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
lowercase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowercase__ = n - k
# Calculate C(n,k)
for i in range(_SCREAMING_SNAKE_CASE ):
result *= n - i
result //= i + 1
return result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return binomial_coefficient(2 * node_count , _SCREAMING_SNAKE_CASE ) // (node_count + 1)
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowercase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> int:
return catalan_number(_SCREAMING_SNAKE_CASE ) * factorial(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 704
|
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 ) -> List[Any]:
lowercase__ = None
if token is not None:
lowercase__ = {'Accept': 'application/vnd.github+json', 'Authorization': F"""Bearer {token}"""}
# The id of a workflow (not of a workflow run)
lowercase__ = '636036'
lowercase__ = F"""https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs"""
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F"""?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}"""
lowercase__ = requests.get(_SCREAMING_SNAKE_CASE , headers=_SCREAMING_SNAKE_CASE ).json()
return result["workflow_runs"]
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = get_daily_ci_runs(_SCREAMING_SNAKE_CASE )
lowercase__ = None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
lowercase__ = workflow_run['id']
break
return workflow_run_id
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
lowercase__ = get_last_daily_ci_runs(_SCREAMING_SNAKE_CASE )
if workflow_run_id is not None:
lowercase__ = get_artifacts_links(worflow_run_id=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
lowercase__ = artifacts_links[artifact_name]
download_artifact(
artifact_name=_SCREAMING_SNAKE_CASE , artifact_url=_SCREAMING_SNAKE_CASE , output_dir=_SCREAMING_SNAKE_CASE , token=_SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
get_last_daily_ci_artifacts(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowercase__ = {}
for artifact_name in artifact_names:
lowercase__ = os.path.join(_SCREAMING_SNAKE_CASE , F"""{artifact_name}.zip""" )
if os.path.isfile(_SCREAMING_SNAKE_CASE ):
lowercase__ = {}
with zipfile.ZipFile(_SCREAMING_SNAKE_CASE ) as z:
for filename in z.namelist():
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
# read the file
with z.open(_SCREAMING_SNAKE_CASE ) as f:
lowercase__ = f.read().decode('UTF-8' )
return results
| 45
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.