code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A ):
return None
class __UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self , __A , __A , __A , __A ):
return None
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """tf""" , 12 , **__A )
@require_torch
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__A , """pt""" , 12 , **__A )
@require_torch
@slow
def snake_case_ ( self ):
from transformers import BertModel
__a = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(__A ) )
vocab_file.flush()
__a = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__a = BertModel(BertConfig(vocab_size=len(__A ) ) )
model.save_pretrained(__A )
self._test_export(__A , """pt""" , 12 , __A )
@require_tf
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(__A , """tf""" , 12 , **__A )
__a = quantize(Path(__A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def snake_case_ ( self ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__a = self._test_export(__A , """pt""" , 12 , **__A )
__a = quantize(__A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def snake_case_ ( self , __A , __A , __A , __A=None , **__A ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__a = Path(__A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__A , __A , __A , __A , __A , **__A )
return path
except Exception as e:
self.fail(__A )
@require_torch
@require_tokenizers
@slow
def snake_case_ ( self ):
from transformers import BertModel
__a = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """pt""" )
@require_tf
@require_tokenizers
@slow
def snake_case_ ( self ):
from transformers import TFBertModel
__a = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__a = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(__A , __A , """tf""" )
def snake_case_ ( self , __A , __A , __A ):
__a = FeatureExtractionPipeline(__A , __A )
__a = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__a , __a , __a , __a = infer_shapes(__A , __A )
# Assert all variables are present
self.assertEqual(len(__A ) , len(__A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __A )
self.assertSequenceEqual(variable_names[3:] , __A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def snake_case_ ( self ):
__a = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__a = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__a , __a = ensure_valid_input(FuncContiguousArgs() , __A , __A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__A ) , set(__A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__a , __a = ensure_valid_input(FuncNonContiguousArgs() , __A , __A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__A ) , 1 )
self.assertEqual(len(__A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def snake_case_ ( self ):
__a = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 99 |
from __future__ import annotations
_lowercase = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
_lowercase = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: str = []
lowerCamelCase__: List[str] = len(_UpperCamelCase )
for i in range(_UpperCamelCase ):
lowerCamelCase__: float = -1
for j in range(i + 1 , _UpperCamelCase ):
if arr[i] < arr[j]:
lowerCamelCase__: Dict = arr[j]
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Tuple = []
for i, outer in enumerate(_UpperCamelCase ):
lowerCamelCase__: float = -1
for inner in arr[i + 1 :]:
if outer < inner:
lowerCamelCase__: Dict = inner
break
result.append(_UpperCamelCase )
return result
def __lowerCAmelCase ( _UpperCamelCase ) -> list[float]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] = len(_UpperCamelCase )
lowerCamelCase__: list[float] = []
lowerCamelCase__: list[float] = [-1] * arr_size
for index in reversed(range(_UpperCamelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
lowerCamelCase__: Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
_lowercase = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 306 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 713 |
from __future__ import annotations
from typing import Any
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not postfix_notation:
return 0
_lowerCamelCase = {'''+''', '''-''', '''*''', '''/'''}
_lowerCamelCase = []
for token in postfix_notation:
if token in operations:
_lowerCamelCase , _lowerCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_a ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297 | 0 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Optional[int] = IFInpaintingSuperResolutionPipeline
_lowercase : str = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_lowercase : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_lowercase : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _lowercase ( self ):
"""simple docstring"""
return self._get_superresolution_dummy_components()
def _lowercase ( self , _lowercase , _lowercase=0 ):
"""simple docstring"""
if str(_lowercase ).startswith("""mps""" ):
_lowerCAmelCase = torch.manual_seed(_lowercase )
else:
_lowerCAmelCase = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
_lowerCAmelCase = floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
_lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def _lowercase ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def _lowercase ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
self._test_save_load_local()
def _lowercase ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 5 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
SCREAMING_SNAKE_CASE = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(PATH_TO_TRANSFORMERS)
SCREAMING_SNAKE_CASE = transformers.models.auto.configuration_auto.CONFIG_MAPPING
SCREAMING_SNAKE_CASE = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"""CLIPSegConfig""": True,
"""DeformableDetrConfig""": True,
"""DetaConfig""": True,
"""DinatConfig""": True,
"""DonutSwinConfig""": True,
"""EfficientFormerConfig""": True,
"""FSMTConfig""": True,
"""JukeboxConfig""": True,
"""LayoutLMv2Config""": True,
"""MaskFormerSwinConfig""": True,
"""MT5Config""": True,
"""NatConfig""": True,
"""OneFormerConfig""": True,
"""PerceiverConfig""": True,
"""RagConfig""": True,
"""SpeechT5Config""": True,
"""SwinConfig""": True,
"""Swin2SRConfig""": True,
"""Swinv2Config""": True,
"""SwitchTransformersConfig""": True,
"""TableTransformerConfig""": True,
"""TapasConfig""": True,
"""TransfoXLConfig""": True,
"""UniSpeechConfig""": True,
"""UniSpeechSatConfig""": True,
"""WavLMConfig""": True,
"""WhisperConfig""": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"""JukeboxPriorConfig""": True,
# TODO: @Younes (for `is_decoder`)
"""Pix2StructTextConfig""": True,
}
)
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F"config.{attribute}" in modeling_source
or F"getattr(config, \"{attribute}\"" in modeling_source
or F"getattr(self.config, \"{attribute}\"" in modeling_source
):
UpperCamelCase = True
# Deal with multi-line cases
elif (
re.search(
RF"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" , UpperCAmelCase_ , )
is not None
):
UpperCamelCase = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
UpperCamelCase = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
UpperCamelCase = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
UpperCamelCase = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
UpperCamelCase = True
if not attribute_used:
UpperCamelCase = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
UpperCamelCase = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
UpperCamelCase = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
UpperCamelCase = True
elif attribute.endswith("_token_id" ):
UpperCamelCase = True
# configuration class specific cases
if not case_allowed:
UpperCamelCase = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
UpperCamelCase = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = dict(inspect.signature(config_class.__init__ ).parameters )
UpperCamelCase = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
UpperCamelCase = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
UpperCamelCase = {}
if len(config_class.attribute_map ) > 0:
UpperCamelCase = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
UpperCamelCase = inspect.getsourcefile(UpperCAmelCase_ )
UpperCamelCase = os.path.dirname(UpperCAmelCase_ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
UpperCamelCase = [os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) for fn in os.listdir(UpperCAmelCase_ ) if fn.startswith("modeling_" )]
# Get the source code strings
UpperCamelCase = []
for path in modeling_paths:
if os.path.isfile(UpperCAmelCase_ ):
with open(UpperCAmelCase_ ) as fp:
modeling_sources.append(fp.read() )
UpperCamelCase = []
for config_param, default_value in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
# `attributes` here is all the variant names for `config_param`
UpperCamelCase = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
unused_attributes.append(attributes[0] )
return sorted(UpperCAmelCase_ )
def lowerCamelCase__ ( )-> List[str]:
"""simple docstring"""
UpperCamelCase = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
UpperCamelCase = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda UpperCAmelCase_ : inspect.isclass(UpperCAmelCase_ )
and issubclass(UpperCAmelCase_ , UpperCAmelCase_ )
and inspect.getmodule(UpperCAmelCase_ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
UpperCamelCase = check_config_attributes_being_used(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = unused_attributes
if len(UpperCAmelCase_ ) > 0:
UpperCamelCase = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F"{name}: {attributes}\n"
raise ValueError(UpperCAmelCase_ )
if __name__ == "__main__":
check_config_attributes()
| 554 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : str , __A : Any=False ) -> Dict:
"""simple docstring"""
lowercase : Tuple =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Dict =[(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowercase_ ( __A : Tuple , __A : str , __A : Tuple=False ) -> int:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : List[str] =''''''
else:
lowercase : str ='''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Dict =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
lowercase : int =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase : int =in_proj_weight[
: config.hidden_size, :
]
lowercase : Any =in_proj_bias[: config.hidden_size]
lowercase : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Optional[int] =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Tuple =in_proj_weight[
-config.hidden_size :, :
]
lowercase : int =in_proj_bias[-config.hidden_size :]
def lowercase_ ( __A : str ) -> Dict:
"""simple docstring"""
lowercase : int =['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__A , __A )
def lowercase_ ( __A : Optional[Any] , __A : str , __A : Any ) -> Any:
"""simple docstring"""
lowercase : Tuple =dct.pop(__A )
lowercase : Any =val
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
lowercase : Union[str, Any] ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : List[Any] =Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase_ ( __A : Tuple , __A : str ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =ViTConfig()
lowercase : Optional[int] =False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
lowercase : Dict =True
lowercase : Union[str, Any] =int(vit_name[-1_2:-1_0] )
lowercase : Tuple =int(vit_name[-9:-6] )
else:
lowercase : Dict =1_0_0_0
lowercase : int ='''huggingface/label-files'''
lowercase : Optional[Any] ='''imagenet-1k-id2label.json'''
lowercase : Any =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : List[str] ={int(__A ): v for k, v in idalabel.items()}
lowercase : List[Any] =idalabel
lowercase : Tuple ={v: k for k, v in idalabel.items()}
lowercase : Optional[int] =int(vit_name[-6:-4] )
lowercase : int =int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
lowercase : str =1_9_2
lowercase : List[str] =7_6_8
lowercase : int =1_2
lowercase : List[str] =3
elif vit_name[9:].startswith('''small''' ):
lowercase : Tuple =3_8_4
lowercase : Tuple =1_5_3_6
lowercase : Dict =1_2
lowercase : int =6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
lowercase : Union[str, Any] =7_6_8
lowercase : Optional[int] =2_3_0_4
lowercase : List[Any] =8
lowercase : Union[str, Any] =8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
lowercase : Optional[Any] =1_0_2_4
lowercase : List[Any] =4_0_9_6
lowercase : Union[str, Any] =2_4
lowercase : int =1_6
elif vit_name[4:].startswith('''huge''' ):
lowercase : Dict =1_2_8_0
lowercase : int =5_1_2_0
lowercase : Optional[int] =3_2
lowercase : int =1_6
# load original model from timm
lowercase : int =timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
lowercase : Optional[int] =timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
lowercase : Union[str, Any] =create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
lowercase : Tuple =ViTModel(__A ).eval()
else:
lowercase : Union[str, Any] =ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
lowercase : Optional[int] =DeiTImageProcessor(size=config.image_size )
else:
lowercase : int =ViTImageProcessor(size=config.image_size )
lowercase : str =image_processor(images=prepare_img() , return_tensors='''pt''' )
lowercase : Dict =encoding['''pixel_values''']
lowercase : Optional[Any] =model(__A )
if base_model:
lowercase : str =timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1E-3 )
else:
lowercase : str =timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1E-3 )
Path(__A ).mkdir(exist_ok=__A )
print(F'Saving model {vit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--vit_name',
default='vit_base_patch16_224',
type=str,
help='Name of the ViT timm model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
"""simple docstring"""
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :torch.FloatTensor
_SCREAMING_SNAKE_CASE :Optional[torch.FloatTensor] = None
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase=0.999 , __lowerCAmelCase="cosine" , ) -> Union[str, Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ : List[Any] = []
for i in range(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : List[str] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__lowerCAmelCase ) / alpha_bar_fn(__lowerCAmelCase ) , __lowerCAmelCase ) )
return torch.tensor(__lowerCAmelCase , dtype=torch.floataa )
class __a (UpperCamelCase_ , UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :List[Any] = 1
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_001 , _a = 0.02 , _a = "linear" , _a = None , _a = True , _a = True , _a = 0 , _a = "epsilon" , _a = 1.0 , **_a , ) -> Dict:
"""simple docstring"""
if kwargs.get("""set_alpha_to_one""" , _a ) is not None:
SCREAMING_SNAKE_CASE__ : Tuple = (
"""The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead."""
)
deprecate("""set_alpha_to_one""" , """1.0.0""" , _a , standard_warn=_a )
SCREAMING_SNAKE_CASE__ : Tuple = kwargs["""set_alpha_to_one"""]
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ : Optional[int] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ : Tuple = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ : Tuple = 1.0
# setable values
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : List[str] = torch.from_numpy(np.arange(0 , _a ).copy().astype(np.intaa ) )
def _a ( self , _a , _a = None ) -> torch.FloatTensor:
"""simple docstring"""
return sample
def _a ( self , _a , _a = None ) -> Optional[int]:
"""simple docstring"""
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ : str = (np.arange(0 , _a ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(_a ).to(_a )
self.timesteps += self.config.steps_offset
def _a ( self , _a , _a , _a , _a = 0.0 , _a = False , _a = None , _a = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ : Optional[int] = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ : Optional[int] = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ : Any = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ : List[Any] = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ : Dict = model_output
SCREAMING_SNAKE_CASE__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ : str = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
""" `v_prediction`""" )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ : Tuple = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Any = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ : Dict = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=_a , pred_original_sample=_a )
def __len__( self ) -> Dict:
"""simple docstring"""
return self.config.num_train_timesteps
| 680 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a :str = 16
a :Union[str, Any] = 32
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Tuple:
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained("""bert-base-cased""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : List[str] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Any = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : str = 16
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Dict = 8
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return tokenizer.pad(
__lowerCAmelCase , padding="""longest""" , max_length=__lowerCAmelCase , pad_to_multiple_of=__lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : int = DataLoader(
tokenized_datasets["""train"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a :Dict = mocked_dataloaders # noqa: F811
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __lowerCAmelCase ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Optional[int] = int(args.gradient_accumulation_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowerCAmelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
"""Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`""" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config["""lr"""]
SCREAMING_SNAKE_CASE__ : str = int(config["""num_epochs"""] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(config["""seed"""] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config["""batch_size"""] )
SCREAMING_SNAKE_CASE__ : Any = evaluate.load("""glue""" , """mrpc""" )
set_seed(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : int = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : int = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AdamW(params=model.parameters() , lr=__lowerCAmelCase )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Any = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(__lowerCAmelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Now we train the model
for epoch in range(__lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowerCAmelCase ):
SCREAMING_SNAKE_CASE__ : str = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = output.loss
accelerator.backward(__lowerCAmelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(**__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , __lowerCAmelCase )
def _lowercase ( ) -> Any:
SCREAMING_SNAKE_CASE__ : str = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__lowerCAmelCase , default=__lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
# New Code #
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__lowerCAmelCase , default=1 , help="""The number of minibatches to be ran before gradients are accumulated.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : int = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 680 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
__lowerCamelCase : List[Any] = FileLock(str(tmpdir / 'foo.lock' ) )
__lowerCamelCase : Optional[int] = 0.01
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
__lowerCamelCase : List[str] = time.time()
locka.acquire(__UpperCAmelCase )
assert time.time() - _start > timeout
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = 'a' * 1_000 + '.lock'
__lowerCamelCase : List[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith('.lock' )
assert not locka._lock_file.endswith(__UpperCAmelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
__lowerCamelCase : str = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__UpperCAmelCase ):
locka.acquire(0 )
| 713 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = 42
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: List[Any] , a: Optional[Any]=3 , a: Tuple=3 , a: str=("DownEncoderBlock2D",) , a: str=(64,) , a: Optional[int]=2 , a: int=32 , a: str="silu" , a: Optional[Any]=True , ):
super().__init__()
__lowerCamelCase : int = layers_per_block
__lowerCamelCase : List[Any] = torch.nn.Convad(
a , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : Dict = nn.ModuleList([] )
# down
__lowerCamelCase : Optional[int] = block_out_channels[0]
for i, down_block_type in enumerate(a ):
__lowerCamelCase : str = output_channel
__lowerCamelCase : Optional[int] = block_out_channels[i]
__lowerCamelCase : Dict = i == len(a ) - 1
__lowerCamelCase : List[Any] = get_down_block(
a , num_layers=self.layers_per_block , in_channels=a , out_channels=a , add_downsample=not is_final_block , resnet_eps=1e-6 , downsample_padding=0 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , )
self.down_blocks.append(a )
# mid
__lowerCamelCase : Optional[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# out
__lowerCamelCase : str = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Optional[Any] = nn.SiLU()
__lowerCamelCase : int = 2 * out_channels if double_z else out_channels
__lowerCamelCase : Tuple = nn.Convad(block_out_channels[-1] , a , 3 , padding=1 )
__lowerCamelCase : List[Any] = False
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase : List[str] = x
__lowerCamelCase : Dict = self.conv_in(a )
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: int ):
def custom_forward(*a: Optional[Any] ):
return module(*a )
return custom_forward
# down
if is_torch_version('>=' , '1.11.0' ):
for down_block in self.down_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , use_reentrant=a )
# middle
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , use_reentrant=a )
else:
for down_block in self.down_blocks:
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a )
# middle
__lowerCamelCase : int = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , a )
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : List[Any] = down_block(a )
# middle
__lowerCamelCase : Union[str, Any] = self.mid_block(a )
# post-process
__lowerCamelCase : Tuple = self.conv_norm_out(a )
__lowerCamelCase : List[str] = self.conv_act(a )
__lowerCamelCase : int = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: int , a: List[str]=3 , a: Tuple=3 , a: str=("UpDecoderBlock2D",) , a: Union[str, Any]=(64,) , a: Optional[Any]=2 , a: Optional[Any]=32 , a: str="silu" , a: Union[str, Any]="group" , ):
super().__init__()
__lowerCamelCase : List[Any] = layers_per_block
__lowerCamelCase : Any = nn.Convad(
a , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__lowerCamelCase : Tuple = None
__lowerCamelCase : int = nn.ModuleList([] )
__lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase : List[Any] = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1e-6 , resnet_act_fn=a , output_scale_factor=1 , resnet_time_scale_shift='default' if norm_type == 'group' else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=a , temb_channels=a , )
# up
__lowerCamelCase : Any = list(reversed(a ) )
__lowerCamelCase : Dict = reversed_block_out_channels[0]
for i, up_block_type in enumerate(a ):
__lowerCamelCase : List[Any] = output_channel
__lowerCamelCase : List[str] = reversed_block_out_channels[i]
__lowerCamelCase : Optional[Any] = i == len(a ) - 1
__lowerCamelCase : Optional[Any] = get_up_block(
a , num_layers=self.layers_per_block + 1 , in_channels=a , out_channels=a , prev_output_channel=a , add_upsample=not is_final_block , resnet_eps=1e-6 , resnet_act_fn=a , resnet_groups=a , attention_head_dim=a , temb_channels=a , resnet_time_scale_shift=a , )
self.up_blocks.append(a )
__lowerCamelCase : List[str] = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : int = SpatialNorm(block_out_channels[0] , a )
else:
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=a , eps=1e-6 )
__lowerCamelCase : Union[str, Any] = nn.SiLU()
__lowerCamelCase : List[Any] = nn.Convad(block_out_channels[0] , a , 3 , padding=1 )
__lowerCamelCase : List[str] = False
def _snake_case ( self: Optional[int] , a: Tuple , a: List[str]=None ):
__lowerCamelCase : List[str] = z
__lowerCamelCase : Union[str, Any] = self.conv_in(a )
__lowerCamelCase : List[str] = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(a: Any ):
def custom_forward(*a: str ):
return module(*a )
return custom_forward
if is_torch_version('>=' , '1.11.0' ):
# middle
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a , use_reentrant=a )
__lowerCamelCase : str = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(a ) , a , a , use_reentrant=a )
else:
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , a , a )
__lowerCamelCase : int = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(create_custom_forward(a ) , a , a )
else:
# middle
__lowerCamelCase : int = self.mid_block(a , a )
__lowerCamelCase : List[str] = sample.to(a )
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = up_block(a , a )
# post-process
if latent_embeds is None:
__lowerCamelCase : Optional[int] = self.conv_norm_out(a )
else:
__lowerCamelCase : Dict = self.conv_norm_out(a , a )
__lowerCamelCase : Any = self.conv_act(a )
__lowerCamelCase : str = self.conv_out(a )
return sample
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self: Optional[int] , a: List[Any] , a: List[Any] , a: List[Any] , a: Tuple=None , a: Tuple="random" , a: List[Any]=False , a: List[str]=True ):
super().__init__()
__lowerCamelCase : Optional[Any] = n_e
__lowerCamelCase : Optional[int] = vq_embed_dim
__lowerCamelCase : Tuple = beta
__lowerCamelCase : List[str] = legacy
__lowerCamelCase : str = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__lowerCamelCase : str = remap
if self.remap is not None:
self.register_buffer('used' , torch.tensor(np.load(self.remap ) ) )
__lowerCamelCase : Dict = self.used.shape[0]
__lowerCamelCase : Optional[Any] = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : Any = self.re_embed
__lowerCamelCase : Optional[int] = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
__lowerCamelCase : int = n_e
__lowerCamelCase : Optional[Any] = sane_index_shape
def _snake_case ( self: Tuple , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : List[Any] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Any = self.used.to(a )
__lowerCamelCase : Union[str, Any] = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Dict = match.argmax(-1 )
__lowerCamelCase : List[Any] = match.sum(2 ) < 1
if self.unknown_index == "random":
__lowerCamelCase : Tuple = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__lowerCamelCase : str = self.unknown_index
return new.reshape(a )
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : List[Any] = inds.shape
assert len(a ) > 1
__lowerCamelCase : Optional[int] = inds.reshape(ishape[0] , -1 )
__lowerCamelCase : Union[str, Any] = self.used.to(a )
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Optional[Any] = 0 # simply set to zero
__lowerCamelCase : Optional[Any] = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , a )
return back.reshape(a )
def _snake_case ( self: int , a: List[str] ):
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase : Union[str, Any] = z.permute(0 , 2 , 3 , 1 ).contiguous()
__lowerCamelCase : List[Any] = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : int = torch.argmin(torch.cdist(a , self.embedding.weight ) , dim=1 )
__lowerCamelCase : str = self.embedding(a ).view(z.shape )
__lowerCamelCase : str = None
__lowerCamelCase : Any = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : int = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__lowerCamelCase : List[Any] = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__lowerCamelCase : int = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Any = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__lowerCamelCase : Optional[Any] = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__lowerCamelCase : Optional[Any] = self.remap_to_used(a )
__lowerCamelCase : Dict = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__lowerCamelCase : str = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _snake_case ( self: Tuple , a: Optional[int] , a: Any ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase : Any = indices.reshape(shape[0] , -1 ) # add batch axis
__lowerCamelCase : Any = self.unmap_to_all(a )
__lowerCamelCase : int = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__lowerCamelCase : str = self.embedding(a )
if shape is not None:
__lowerCamelCase : str = z_q.view(a )
# reshape back to match original input shape
__lowerCamelCase : Optional[Any] = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: Any=False ):
__lowerCamelCase : Tuple = parameters
__lowerCamelCase , __lowerCamelCase : Any = torch.chunk(a , 2 , dim=1 )
__lowerCamelCase : List[str] = torch.clamp(self.logvar , -3_0.0 , 2_0.0 )
__lowerCamelCase : int = deterministic
__lowerCamelCase : Dict = torch.exp(0.5 * self.logvar )
__lowerCamelCase : str = torch.exp(self.logvar )
if self.deterministic:
__lowerCamelCase : Optional[Any] = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _snake_case ( self: Union[str, Any] , a: Optional[torch.Generator] = None ):
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase : Union[str, Any] = randn_tensor(
self.mean.shape , generator=a , device=self.parameters.device , dtype=self.parameters.dtype )
__lowerCamelCase : str = self.mean + self.std * sample
return x
def _snake_case ( self: List[str] , a: Union[str, Any]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _snake_case ( self: Optional[Any] , a: str , a: Any=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__lowerCamelCase : int = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=a )
def _snake_case ( self: Optional[int] ):
return self.mean
| 230 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,**lowerCamelCase_ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' ,__snake_case ,)
super().__init__(*__snake_case ,**__snake_case )
| 614 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : List[Any] = False
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--repo_path",
default=None,
type=str,
required=True,
help="The config json file corresponding to the architecture.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
_lowerCAmelCase : List[Any] = parser.parse_args()
_lowerCAmelCase : Any = {
"image_size": "sample_size",
"num_res_blocks": "layers_per_block",
"block_channels": "block_out_channels",
"down_blocks": "down_block_types",
"up_blocks": "up_block_types",
"downscale_freq_shift": "freq_shift",
"resnet_num_groups": "norm_num_groups",
"resnet_act_fn": "act_fn",
"resnet_eps": "norm_eps",
"num_head_channels": "attention_head_dim",
}
_lowerCAmelCase : Tuple = {
"time_steps": "time_proj",
"mid": "mid_block",
"downsample_blocks": "down_blocks",
"upsample_blocks": "up_blocks",
}
_lowerCAmelCase : str = "" if has_file(args.repo_path, "config.json") else "unet"
with open(os.path.join(args.repo_path, subfolder, "config.json"), "r", encoding="utf-8") as reader:
_lowerCAmelCase : int = reader.read()
_lowerCAmelCase : Optional[Any] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, "config.json"):
_lowerCAmelCase : Any = UNetaDModel(**config)
else:
_lowerCAmelCase : Any = UNetaDConditionModel if "ldm-text2im-large-256" in args.repo_path else UNetaDModel
_lowerCAmelCase : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowerCAmelCase : Union[str, Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowerCAmelCase : Optional[int] = config[key]
del config[key]
_lowerCAmelCase : int = [k.replace("UNetRes", "") for k in config["down_block_types"]]
_lowerCAmelCase : str = [k.replace("UNetRes", "") for k in config["up_block_types"]]
if do_only_weights:
_lowerCAmelCase : List[Any] = torch.load(os.path.join(args.repo_path, subfolder, "diffusion_pytorch_model.bin"))
_lowerCAmelCase : Dict = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(".op.bias") or param_key.endswith(".op.weight"):
continue
_lowerCAmelCase : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(".")[0] == key:
_lowerCAmelCase : Tuple = param_value
_lowerCAmelCase : int = True
if not has_changed:
_lowerCAmelCase : Optional[int] = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 242 | 0 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase__ :List[Any] = imread(R'digital_image_processing/image_data/lena_small.jpg')
lowercase__ :int = cvtColor(img, COLOR_BGR2GRAY)
def lowerCamelCase_ ( ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = cn.convert_to_negative(UpperCAmelCase_ )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCamelCase_ ( ) ->Tuple:
"""simple docstring"""
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCAmelCase_ , 1_10 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCamelCase_ ( ) ->int:
"""simple docstring"""
__UpperCAmelCase : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCamelCase_ ( ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCAmelCase : Any = canny.canny(UpperCAmelCase_ )
# assert canny array for at least one True
assert canny_array.any()
def lowerCamelCase_ ( ) ->List[str]:
"""simple docstring"""
assert gg.gaussian_filter(UpperCAmelCase_ , 5 , sigma=0.9 ).all()
def lowerCamelCase_ ( ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCAmelCase : Tuple = conv.img_convolve(UpperCAmelCase_ , UpperCAmelCase_ ).astype(UpperCAmelCase_ )
assert res.any()
def lowerCamelCase_ ( ) ->Tuple:
"""simple docstring"""
assert med.median_filter(UpperCAmelCase_ , 3 ).any()
def lowerCamelCase_ ( ) ->Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = sob.sobel_filter(UpperCAmelCase_ )
assert grad.any() and theta.any()
def lowerCamelCase_ ( ) ->Tuple:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = sp.make_sepia(UpperCAmelCase_ , 20 )
assert sepia.all()
def lowerCamelCase_ ( UpperCAmelCase_ = "digital_image_processing/image_data/lena_small.jpg" ) ->Union[str, Any]:
"""simple docstring"""
__UpperCAmelCase : int = bs.Burkes(imread(UpperCAmelCase_ , 1 ) , 1_20 )
burkes.process()
assert burkes.output_img.any()
def lowerCamelCase_ ( UpperCAmelCase_ = "digital_image_processing/image_data/lena_small.jpg" , ) ->List[Any]:
"""simple docstring"""
__UpperCAmelCase : Tuple = rs.NearestNeighbour(imread(UpperCAmelCase_ , 1 ) , 4_00 , 2_00 )
nn.process()
assert nn.output.any()
def lowerCamelCase_ ( ) ->int:
"""simple docstring"""
__UpperCAmelCase : Dict = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
__UpperCAmelCase : Optional[int] = imread(UpperCAmelCase_ , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCAmelCase : Tuple = 0
__UpperCAmelCase : List[Any] = 0
__UpperCAmelCase : Optional[int] = image[x_coordinate][y_coordinate]
__UpperCAmelCase : List[str] = lbp.get_neighbors_pixel(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCAmelCase : Tuple = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCAmelCase : Optional[int] = lbp.local_binary_value(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
assert lbp_image.any() | 720 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) ->Optional[int]:
"""simple docstring"""
__UpperCAmelCase : Dict = LxmertConfig.from_json_file(UpperCAmelCase_ )
print(f'''Building PyTorch model from configuration: {config}''' )
__UpperCAmelCase : Tuple = LxmertForPreTraining(UpperCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowercase__ :Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 374 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 486 | from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : Any = ['vqvae']
def __init__(self : List[Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> Tuple:
"""simple docstring"""
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , __UpperCAmelCase ) else 1_0_0_0
@torch.no_grad()
def __call__(self : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : List[Any]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
UpperCAmelCase__ = steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase__ = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase__ = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase__ = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
UpperCAmelCase__ = noise
UpperCAmelCase__ = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase__ = self.mel.audio_slice_to_image(__UpperCAmelCase )
UpperCAmelCase__ = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase__ = (input_image / 2_5_5) * 2 - 1
UpperCAmelCase__ = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase__ = self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
UpperCAmelCase__ = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase__ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase__ = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase__ = int(mask_start_secs * pixels_per_second )
UpperCAmelCase__ = int(mask_end_secs * pixels_per_second )
UpperCAmelCase__ = self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
UpperCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["sample"]
else:
UpperCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
if isinstance(self.scheduler , __UpperCAmelCase ):
UpperCAmelCase__ = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
else:
UpperCAmelCase__ = self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
UpperCAmelCase__ = mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase__ = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase__ = 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase__ = self.vqvae.decode(__UpperCAmelCase )["sample"]
UpperCAmelCase__ = (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase__ = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase__ = (images * 2_5_5).round().astype("uint8" )
UpperCAmelCase__ = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="RGB" ).convert("L" ) for _ in images) )
UpperCAmelCase__ = [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 5_0 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase__ = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase__ = (sample / 2_5_5) * 2 - 1
UpperCAmelCase__ = torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase__ = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase__ = self.scheduler.alphas_cumprod[t]
UpperCAmelCase__ = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase__ = 1 - alpha_prod_t
UpperCAmelCase__ = self.unet(__UpperCAmelCase , __UpperCAmelCase )["sample"]
UpperCAmelCase__ = (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase__ = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase__ = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def lowercase_ (__UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
UpperCAmelCase__ = acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 486 | 1 |
from __future__ import annotations
def A ( __UpperCamelCase ) -> float:
if not nums:
raise ValueError('List is empty' )
return sum(__UpperCamelCase ) / len(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> YolosConfig:
A__ = YolosConfig()
# size of the architecture
if "yolos_ti" in yolos_name:
A__ = 192
A__ = 768
A__ = 12
A__ = 3
A__ = [800, 1_333]
A__ = False
elif yolos_name == "yolos_s_dWr":
A__ = 330
A__ = 14
A__ = 6
A__ = 1_320
elif "yolos_s" in yolos_name:
A__ = 384
A__ = 1_536
A__ = 12
A__ = 6
elif "yolos_b" in yolos_name:
A__ = [800, 1_344]
A__ = 91
A__ = 'huggingface/label-files'
A__ = 'coco-detection-id2label.json'
A__ = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='dataset' ) , 'r' ) )
A__ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> str:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[-config.hidden_size :, :]
A__ = in_proj_bias[-config.hidden_size :]
def A ( __UpperCamelCase ) -> str:
if "backbone" in name:
A__ = name.replace('backbone' , 'vit' )
if "cls_token" in name:
A__ = name.replace('cls_token' , 'embeddings.cls_token' )
if "det_token" in name:
A__ = name.replace('det_token' , 'embeddings.detection_tokens' )
if "mid_pos_embed" in name:
A__ = name.replace('mid_pos_embed' , 'encoder.mid_position_embeddings' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A__ = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A__ = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "class_embed" in name:
A__ = name.replace('class_embed' , 'class_labels_classifier' )
if "bbox_embed" in name:
A__ = name.replace('bbox_embed' , 'bbox_predictor' )
if "vit.norm" in name:
A__ = name.replace('vit.norm' , 'vit.layernorm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> dict:
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(__UpperCamelCase )
if "qkv" in key:
A__ = key.split('.' )
A__ = int(key_split[2] )
A__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = val
return orig_state_dict
def A ( ) -> torch.Tensor:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ) -> List[str]:
A__ = get_yolos_config(__UpperCamelCase )
# load original state_dict
A__ = torch.load(__UpperCamelCase , map_location='cpu' )['model']
# load 🤗 model
A__ = YolosForObjectDetection(__UpperCamelCase )
model.eval()
A__ = convert_state_dict(__UpperCamelCase , __UpperCamelCase )
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by YolosImageProcessor
A__ = 800 if yolos_name != 'yolos_ti' else 512
A__ = YolosImageProcessor(format='coco_detection' , size=__UpperCamelCase )
A__ = image_processor(images=prepare_img() , return_tensors='pt' )
A__ = model(**__UpperCamelCase )
A__ , A__ = outputs.logits, outputs.pred_boxes
A__ , A__ = None, None
if yolos_name == "yolos_ti":
A__ = torch.tensor(
[[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] )
A__ = torch.tensor(
[[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] )
elif yolos_name == "yolos_s_200_pre":
A__ = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] )
A__ = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] )
elif yolos_name == "yolos_s_300_pre":
A__ = torch.tensor(
[[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] )
A__ = torch.tensor(
[[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] )
elif yolos_name == "yolos_s_dWr":
A__ = torch.tensor(
[[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] )
A__ = torch.tensor(
[[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] )
elif yolos_name == "yolos_base":
A__ = torch.tensor(
[[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] )
A__ = torch.tensor(
[[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] )
else:
raise ValueError(f'''Unknown yolos_name: {yolos_name}''' )
assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1E-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if push_to_hub:
A__ = {
'yolos_ti': 'yolos-tiny',
'yolos_s_200_pre': 'yolos-small',
'yolos_s_300_pre': 'yolos-small-300',
'yolos_s_dWr': 'yolos-small-dwr',
'yolos_base': 'yolos-base',
}
print('Pushing to the hub...' )
A__ = model_mapping[yolos_name]
image_processor.push_to_hub(__UpperCamelCase , organization='hustvl' )
model.push_to_hub(__UpperCamelCase , organization='hustvl' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--yolos_name''',
default='''yolos_s_200_pre''',
type=str,
help=(
'''Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\','''
''' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.'''
),
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original state dict (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 52 | 0 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
UpperCAmelCase_ : Dict = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = Dict[str, Any]
UpperCAmelCase_ : Dict = List[Prediction]
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
super().__init__(*lowercase_ , **lowercase_)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , '''vision''')
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items()))
def _SCREAMING_SNAKE_CASE ( self : Any , **lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE_ : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self : List[Any] , *lowercase_ : int , **lowercase_ : Optional[Any]):
'''simple docstring'''
return super().__call__(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = load_image(lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = torch.IntTensor([[image.height, image.width]])
SCREAMING_SNAKE_CASE_ : int = self.image_processor(images=[image] , return_tensors='''pt''')
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE_ : int = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : Dict = target_size
return inputs
def _SCREAMING_SNAKE_CASE ( self : int , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = model_inputs.pop('''target_size''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model(**lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = outputs.__class__({'''target_size''': target_size, **outputs})
if self.tokenizer is not None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_inputs['''bbox''']
return model_outputs
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Optional[Any] , lowercase_ : Dict=0.9):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = target_size[0].tolist()
def unnormalize(lowercase_ : Any):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
]))
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = model_outputs['''logits'''].squeeze(0).softmax(dim=-1).max(dim=-1)
SCREAMING_SNAKE_CASE_ : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
SCREAMING_SNAKE_CASE_ : int = [unnormalize(lowercase_) for bbox in model_outputs['''bbox'''].squeeze(0)]
SCREAMING_SNAKE_CASE_ : List[str] = ['''score''', '''label''', '''box''']
SCREAMING_SNAKE_CASE_ : str = [dict(zip(lowercase_ , lowercase_)) for vals in zip(scores.tolist() , lowercase_ , lowercase_) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
SCREAMING_SNAKE_CASE_ : Tuple = self.image_processor.post_process_object_detection(lowercase_ , lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = raw_annotations[0]
SCREAMING_SNAKE_CASE_ : Any = raw_annotation['''scores''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = raw_annotation['''labels''']
SCREAMING_SNAKE_CASE_ : Optional[int] = raw_annotation['''boxes''']
SCREAMING_SNAKE_CASE_ : Tuple = scores.tolist()
SCREAMING_SNAKE_CASE_ : List[Any] = [self.model.config.idalabel[label.item()] for label in labels]
SCREAMING_SNAKE_CASE_ : Tuple = [self._get_bounding_box(lowercase_) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
SCREAMING_SNAKE_CASE_ : List[str] = ['''score''', '''label''', '''box''']
SCREAMING_SNAKE_CASE_ : Optional[int] = [
dict(zip(lowercase_ , lowercase_))
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''])
]
return annotation
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowercase_ : "torch.Tensor"):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''')
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = box.int().tolist()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 512 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Dict=32 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=[10, 20, 30, 40] , lowercase_ : Union[str, Any]=[2, 2, 3, 2] , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=["stage2", "stage3", "stage4"] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_stages
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ConvNextModelTester(self)
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ConvNextModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCamelCase = ConvNextConfig
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ConvNextModelTester(self)
| 512 | 1 |
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : List[str] = '''▁'''
_lowerCamelCase : Tuple = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = BertGenerationTokenizer
UpperCAmelCase_ = False
UpperCAmelCase_ = True
def A_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE__ : Any = BertGenerationTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = "<s>"
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ), _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ), _UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], "<unk>" )
self.assertEqual(vocab_keys[1], "<s>" )
self.assertEqual(vocab_keys[-1], "<pad>" )
self.assertEqual(len(_UpperCAmelCase ), 1_0_0_2 )
def A_ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size, 1_0_0_0 )
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = BertGenerationTokenizer(_UpperCAmelCase, keep_accents=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCAmelCase, ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ), [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2], )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4], )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
@cached_property
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def A_ ( self : str ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = "Hello World!"
SCREAMING_SNAKE_CASE__ : Any = [1_8_5_3_6, 2_2_6_0, 1_0_1]
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@slow
def A_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
SCREAMING_SNAKE_CASE__ : Tuple = [
8_7_1,
4_1_9,
3_5_8,
9_4_6,
9_9_1,
2_5_2_1,
4_5_2,
3_5_8,
1_3_5_7,
3_8_7,
7_7_5_1,
3_5_3_6,
1_1_2,
9_8_5,
4_5_6,
1_2_6,
8_6_5,
9_3_8,
5_4_0_0,
5_7_3_4,
4_5_8,
1_3_6_8,
4_6_7,
7_8_6,
2_4_6_2,
5_2_4_6,
1_1_5_9,
6_3_3,
8_6_5,
4_5_1_9,
4_5_7,
5_8_2,
8_5_2,
2_5_5_7,
4_2_7,
9_1_6,
5_0_8,
4_0_5,
3_4_3_2_4,
4_9_7,
3_9_1,
4_0_8,
1_1_3_4_2,
1_2_4_4,
3_8_5,
1_0_0,
9_3_8,
9_8_5,
4_5_6,
5_7_4,
3_6_2,
1_2_5_9_7,
3_2_0_0,
3_1_2_9,
1_1_7_2,
]
self.assertListEqual(_UpperCAmelCase, self.big_tokenizer.encode(_UpperCAmelCase ) )
@require_torch
@slow
def A_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
SCREAMING_SNAKE_CASE__ : Optional[int] = list(self.big_tokenizer.get_vocab().keys() )[:1_0]
SCREAMING_SNAKE_CASE__ : str = " ".join(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.big_tokenizer.encode_plus(_UpperCAmelCase, return_tensors="pt", return_token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = BertGenerationConfig()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = BertGenerationEncoder(_UpperCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCAmelCase )
model(**_UpperCAmelCase )
@slow
def A_ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
# fmt: off
SCREAMING_SNAKE_CASE__ : Dict = {"input_ids": [[3_9_2_8_6, 4_5_8, 3_6_3_3_5, 2_0_0_1, 4_5_6, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 7_7_4_6, 1_7_4_1, 1_1_1_5_7, 3_9_1, 1_3_0_7_3, 1_3_2_6_6, 4_5_5, 1_1_3, 3_9_6_7, 3_5_4_1_2, 1_1_3, 4_9_3_6, 1_0_9, 3_8_7_0, 2_3_7_7, 1_1_3, 3_0_0_8_4, 4_5_7_2_0, 4_5_8, 1_3_4, 1_7_4_9_6, 1_1_2, 5_0_3, 1_1_6_7_2, 1_1_3, 1_1_8, 1_1_2, 5_6_6_5, 1_3_3_4_7, 3_8_6_8_7, 1_1_2, 1_4_9_6, 3_1_3_8_9, 1_1_2, 3_2_6_8, 4_7_2_6_4, 1_3_4, 9_6_2, 1_1_2, 1_6_3_7_7, 8_0_3_5, 2_3_1_3_0, 4_3_0, 1_2_1_6_9, 1_5_5_1_8, 2_8_5_9_2, 4_5_8, 1_4_6, 4_1_6_9_7, 1_0_9, 3_9_1, 1_2_1_6_9, 1_5_5_1_8, 1_6_6_8_9, 4_5_8, 1_4_6, 4_1_3_5_8, 1_0_9, 4_5_2, 7_2_6, 4_0_3_4, 1_1_1, 7_6_3, 3_5_4_1_2, 5_0_8_2, 3_8_8, 1_9_0_3, 1_1_1, 9_0_5_1, 3_9_1, 2_8_7_0, 4_8_9_1_8, 1_9_0_0, 1_1_2_3, 5_5_0, 9_9_8, 1_1_2, 9_5_8_6, 1_5_9_8_5, 4_5_5, 3_9_1, 4_1_0, 2_2_9_5_5, 3_7_6_3_6, 1_1_4], [4_4_8, 1_7_4_9_6, 4_1_9, 3_6_6_3, 3_8_5, 7_6_3, 1_1_3, 2_7_5_3_3, 2_8_7_0, 3_2_8_3, 1_3_0_4_3, 1_6_3_9, 2_4_7_1_3, 5_2_3, 6_5_6, 2_4_0_1_3, 1_8_5_5_0, 2_5_2_1, 5_1_7, 2_7_0_1_4, 2_1_2_4_4, 4_2_0, 1_2_1_2, 1_4_6_5, 3_9_1, 9_2_7, 4_8_3_3, 3_8_8, 5_7_8, 1_1_7_8_6, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_8_4, 2_1_6_9, 7_6_8_7, 2_1_9_3_2, 1_8_1_4_6, 7_2_6, 3_6_3, 1_7_0_3_2, 3_3_9_1, 1_1_4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase, model_name="google/bert_for_seq_generation_L-24_bbc_encoder", revision="c817d1fd1be2ffa69431227a1fe320544943d4db", )
| 157 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_lowerCamelCase : List[Any] = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
_lowerCamelCase : List[str] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
_lowerCamelCase : Optional[int] = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
"""simple docstring"""
def A_ ( self : Tuple ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token" ), id="sequence" ), id="references" ),
} ), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def A_ ( self : Optional[Any], _UpperCAmelCase : str, _UpperCAmelCase : str, _UpperCAmelCase : Union[str, Any]=4, _UpperCAmelCase : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = compute_bleu(
reference_corpus=_UpperCAmelCase, translation_corpus=_UpperCAmelCase, max_order=_UpperCAmelCase, smooth=_UpperCAmelCase )
((SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__) ,(SCREAMING_SNAKE_CASE__)) : Union[str, Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 157 | 1 |
def _A ( lowerCAmelCase_ : list ):
"""simple docstring"""
lowerCAmelCase__ = False
while is_sorted is False: # Until all the indices are traversed keep looping
lowerCAmelCase__ = True
for i in range(0 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCAmelCase__ = False
for i in range(1 , len(lowerCAmelCase_ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ = input_list[i + 1], input_list[i]
# swapping if elements not in order
lowerCAmelCase__ = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
UpperCamelCase = [int(x) for x in input().split()]
# inputing elements of the list in one line
UpperCamelCase = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 61 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A ( unittest.TestCase ):
def _A (self , lowerCAmelCase , lowerCAmelCase ):
return f'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase ) for s in shape] )}.npy'
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 4, 6_4, 6_4) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return image
def _A (self , lowerCAmelCase=False , lowerCAmelCase="CompVis/stable-diffusion-v1-4" ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= 'bf16' if fpaa else None
__lowercase, __lowercase= FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase , subfolder='unet' , dtype=lowerCAmelCase , revision=lowerCAmelCase )
return model, params
def _A (self , lowerCAmelCase=0 , lowerCAmelCase=(4, 7_7, 7_6_8) , lowerCAmelCase=False ):
__lowercase= jnp.bfloataa if fpaa else jnp.floataa
__lowercase= jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase , lowerCAmelCase ) ) , dtype=lowerCAmelCase )
return hidden_states
@parameterized.expand(
[
# fmt: off
[8_3, 4, [-0.23_23, -0.13_04, 0.08_13, -0.30_93, -0.09_19, -0.15_71, -0.11_25, -0.58_06]],
[1_7, 0.55, [-0.08_31, -0.24_43, 0.09_01, -0.09_19, 0.33_96, 0.01_03, -0.37_43, 0.07_01]],
[8, 0.89, [-0.48_63, 0.08_59, 0.08_75, -0.16_58, 0.91_99, -0.01_14, 0.48_39, 0.46_39]],
[3, 1_0_0_0, [-0.56_49, 0.24_02, -0.55_18, 0.12_48, 1.13_28, -0.24_43, -0.03_25, -1.00_78]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[8_3, 4, [0.15_14, 0.08_07, 0.16_24, 0.10_16, -0.18_96, 0.02_63, 0.06_77, 0.23_10]],
[1_7, 0.55, [0.11_64, -0.02_16, 0.01_70, 0.15_89, -0.31_20, 0.10_05, -0.05_81, -0.14_58]],
[8, 0.89, [-0.17_58, -0.01_69, 0.10_04, -0.14_11, 0.13_12, 0.11_03, -0.19_96, 0.21_39]],
[3, 1_0_0_0, [0.12_14, 0.03_52, -0.07_31, -0.15_62, -0.09_94, -0.09_06, -0.23_40, -0.05_39]],
# fmt: on
] )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase, __lowercase= self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=lowerCAmelCase )
__lowercase= self.get_latents(lowerCAmelCase , shape=(4, 4, 9_6, 9_6) , fpaa=lowerCAmelCase )
__lowercase= self.get_encoder_hidden_states(lowerCAmelCase , shape=(4, 7_7, 1_0_2_4) , fpaa=lowerCAmelCase )
__lowercase= model.apply(
{'params': params} , lowerCAmelCase , jnp.array(lowerCAmelCase , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase , ).sample
assert sample.shape == latents.shape
__lowercase= jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowercase= jnp.array(lowerCAmelCase , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-2 )
| 230 | 0 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if len(_lowercase ) <= 1:
return lst
a__ = 1
while i < len(_lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
a__ , a__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
a__ = 1
return lst
if __name__ == "__main__":
UpperCamelCase_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase_ : Dict = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 394 |
'''simple docstring'''
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if "model" in orig_key:
a__ = orig_key.replace("model." , "" )
if "norm1" in orig_key:
a__ = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
a__ = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
a__ = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
a__ = orig_key.split("." )[0].split("_" )[-1]
a__ = orig_key.replace(F'transformer_{layer_num}' , F'encoder.layer.{layer_num}' )
if "mha.attn" in orig_key:
a__ = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
a__ = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
a__ = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
a__ = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
a__ = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
a__ = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
a__ = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
a__ = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
a__ = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
a__ = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
a__ = "yoso." + orig_key
return orig_key
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a__ = orig_state_dict.pop(_lowercase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
a__ = val
a__ = orig_state_dict["cls.predictions.decoder.bias"]
a__ = torch.arange(_lowercase ).expand((1, -1) ) + 2
return orig_state_dict
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase ):
"""simple docstring"""
a__ = torch.load(_lowercase , map_location="cpu" )["model_state_dict"]
a__ = YosoConfig.from_json_file(_lowercase )
a__ = YosoForMaskedLM(_lowercase )
a__ = convert_checkpoint_helper(config.max_position_embeddings , _lowercase )
print(model.load_state_dict(_lowercase ) )
model.eval()
model.save_pretrained(_lowercase )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
if __name__ == "__main__":
UpperCamelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The json file for YOSO model config.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ : Any = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 394 | 1 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase__ : Any = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowerCamelCase__ ( _A , _A=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 376 |
"""simple docstring"""
from __future__ import annotations
from random import random
class A_ :
def __init__( self: List[str] ,__lowerCAmelCase: int | None = None ):
'''simple docstring'''
_lowerCamelCase : Any = value
_lowerCamelCase : Optional[int] = random()
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def __repr__( self: Tuple ):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return F"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{F"""{self.value}: {self.prior:.5}""": (self.left, self.right)} ,indent=1 )
def __str__( self: List[Any] ):
'''simple docstring'''
_lowerCamelCase : Tuple = str(self.value ) + " "
_lowerCamelCase : Optional[Any] = str(self.left or "" )
_lowerCamelCase : int = str(self.right or "" )
return value + left + right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> tuple[Node | None, Node | None]:
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCamelCase, _lowerCamelCase : int = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCamelCase, _lowerCamelCase : Optional[int] = split(root.right , _lowerCamelCase )
return root, right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCamelCase : Any = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCamelCase : Optional[Any] = merge(_lowerCamelCase , right.left )
return right
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase : int = Node(_lowerCamelCase )
_lowerCamelCase, _lowerCamelCase : Tuple = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , value - 1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def lowerCamelCase_( _lowerCamelCase ) -> None:
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> Node | None:
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCamelCase : Optional[Any] = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCamelCase : Optional[Any] = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def lowerCamelCase_( ) -> None:
'''simple docstring'''
_lowerCamelCase : List[Any] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCamelCase : int = input()
while args != "q":
_lowerCamelCase : List[str] = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCamelCase : Tuple = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 46 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase ={
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase =[
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 255 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE )
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
_lowerCamelCase = Features({'''text''': Value('''string''' )} )
_lowerCamelCase = Features({'''labels''': ClassLabel} )
_lowerCamelCase = "text"
_lowerCamelCase = "labels"
def UpperCamelCase__ ( self ,lowerCamelCase_ ) -> int:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] ,lowerCamelCase_ ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
A = copy.deepcopy(self )
A = self.label_schema.copy()
A = features[self.label_column]
A = label_schema
return task_template
@property
def UpperCamelCase__ ( self ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 255 | 1 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case : str = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case : Optional[int] = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Any , _snake_case : Optional[Any]=8 ) -> Tuple:
'''simple docstring'''
__magic_name__ : List[str] = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__magic_name__ : Dict = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _snake_case ( snake_case ):
def __init__( self , _a , _a , _a , _a , _a , ):
super().__init__()
self.register_modules(
text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , movq=_a , )
__magic_name__ : List[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
if latents is None:
__magic_name__ : List[Any] = randn_tensor(_a , generator=_a , device=_a , dtype=_a )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
__magic_name__ : Optional[Any] = latents.to(_a )
__magic_name__ : Optional[Any] = latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a=None , ):
__magic_name__ : List[str] = len(_a ) if isinstance(_a , _a ) else 1
# get prompt text embeddings
__magic_name__ : str = self.tokenizer(
_a , padding="max_length" , truncation=_a , max_length=77 , return_attention_mask=_a , add_special_tokens=_a , return_tensors="pt" , )
__magic_name__ : Optional[Any] = text_inputs.input_ids
__magic_name__ : Union[str, Any] = self.tokenizer(_a , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_a , _a ):
__magic_name__ : Any = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__magic_name__ : Union[str, Any] = text_input_ids.to(_a )
__magic_name__ : str = text_inputs.attention_mask.to(_a )
__magic_name__ , __magic_name__ : Any = self.text_encoder(
input_ids=_a , attention_mask=_a )
__magic_name__ : List[Any] = prompt_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : Any = text_encoder_hidden_states.repeat_interleave(_a , dim=0 )
__magic_name__ : Optional[int] = text_mask.repeat_interleave(_a , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : List[str]
if negative_prompt is None:
__magic_name__ : Optional[int] = [""] * batch_size
elif type(_a ) is not type(_a ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(_a )} !='''
f''' {type(_a )}.''' )
elif isinstance(_a , _a ):
__magic_name__ : Tuple = [negative_prompt]
elif batch_size != len(_a ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(_a )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
" the batch size of `prompt`." )
else:
__magic_name__ : Union[str, Any] = negative_prompt
__magic_name__ : List[Any] = self.tokenizer(
_a , padding="max_length" , max_length=77 , truncation=_a , return_attention_mask=_a , add_special_tokens=_a , return_tensors="pt" , )
__magic_name__ : str = uncond_input.input_ids.to(_a )
__magic_name__ : Any = uncond_input.attention_mask.to(_a )
__magic_name__ , __magic_name__ : str = self.text_encoder(
input_ids=_a , attention_mask=_a )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__magic_name__ : Tuple = negative_prompt_embeds.shape[1]
__magic_name__ : Union[str, Any] = negative_prompt_embeds.repeat(1 , _a )
__magic_name__ : str = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _a )
__magic_name__ : Tuple = uncond_text_encoder_hidden_states.shape[1]
__magic_name__ : str = uncond_text_encoder_hidden_states.repeat(1 , _a , 1 )
__magic_name__ : int = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _a , -1 )
__magic_name__ : Optional[int] = uncond_text_mask.repeat_interleave(_a , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__magic_name__ : List[Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
__magic_name__ : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__magic_name__ : Optional[int] = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def SCREAMING_SNAKE_CASE ( self , _a=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
__magic_name__ : Tuple = torch.device(f'''cuda:{gpu_id}''' )
__magic_name__ : Union[str, Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
def SCREAMING_SNAKE_CASE ( self , _a=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
__magic_name__ : int = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_a )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__magic_name__ : List[str] = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__magic_name__ , __magic_name__ : Any = cpu_offload_with_hook(_a , _a , prev_module_hook=_a )
if self.safety_checker is not None:
__magic_name__ , __magic_name__ : Optional[int] = cpu_offload_with_hook(self.safety_checker , _a , prev_module_hook=_a )
# We'll offload the last model manually.
__magic_name__ : List[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_a )
def __call__( self , _a , _a , _a , _a = None , _a = 512 , _a = 512 , _a = 100 , _a = 4.0 , _a = 1 , _a = None , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , _a ):
__magic_name__ : Dict = 1
elif isinstance(_a , _a ):
__magic_name__ : Union[str, Any] = len(_a )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(_a )}''' )
__magic_name__ : Dict = self._execution_device
__magic_name__ : List[str] = batch_size * num_images_per_prompt
__magic_name__ : Tuple = guidance_scale > 1.0
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = self._encode_prompt(
_a , _a , _a , _a , _a )
if isinstance(_a , _a ):
__magic_name__ : Any = torch.cat(_a , dim=0 )
if isinstance(_a , _a ):
__magic_name__ : str = torch.cat(_a , dim=0 )
if do_classifier_free_guidance:
__magic_name__ : str = image_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : Tuple = negative_image_embeds.repeat_interleave(_a , dim=0 )
__magic_name__ : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_a )
self.scheduler.set_timesteps(_a , device=_a )
__magic_name__ : str = self.scheduler.timesteps
__magic_name__ : Union[str, Any] = self.unet.config.in_channels
__magic_name__ , __magic_name__ : str = get_new_h_w(_a , _a , self.movq_scale_factor )
# create initial latent
__magic_name__ : Optional[Any] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _a , _a , _a , self.scheduler , )
for i, t in enumerate(self.progress_bar(_a ) ):
# expand the latents if we are doing classifier free guidance
__magic_name__ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__magic_name__ : Any = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
__magic_name__ : Optional[Any] = self.unet(
sample=_a , timestep=_a , encoder_hidden_states=_a , added_cond_kwargs=_a , return_dict=_a , )[0]
if do_classifier_free_guidance:
__magic_name__ , __magic_name__ : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__magic_name__ , __magic_name__ : Optional[int] = noise_pred.chunk(2 )
__magic_name__ , __magic_name__ : int = variance_pred.chunk(2 )
__magic_name__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__magic_name__ : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__magic_name__ , __magic_name__ : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__magic_name__ : str = self.scheduler.step(
_a , _a , _a , generator=_a , ).prev_sample
# post-processing
__magic_name__ : Optional[int] = self.movq.decode(_a , force_not_quantize=_a )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
__magic_name__ : str = image * 0.5 + 0.5
__magic_name__ : Optional[int] = image.clamp(0 , 1 )
__magic_name__ : int = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__magic_name__ : Any = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 124 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class _snake_case :
def __init__( self , _a , _a=13 , _a=7 , _a=False , _a=True , _a=False , _a=True , _a=33 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__magic_name__ : Dict = parent
__magic_name__ : List[str] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : int = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : str = use_token_type_ids
__magic_name__ : Dict = use_labels
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Union[str, Any] = num_hidden_layers
__magic_name__ : Union[str, Any] = num_attention_heads
__magic_name__ : str = intermediate_size
__magic_name__ : Tuple = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : Tuple = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : Optional[int] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Optional[Any] = scope
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Union[str, Any] = None
if self.use_input_mask:
__magic_name__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
__magic_name__ : List[Any] = None
__magic_name__ : List[str] = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : Dict = EsmModel(config=_a )
model.to(_a )
model.eval()
__magic_name__ : str = model(_a , attention_mask=_a )
__magic_name__ : List[str] = model(_a )
__magic_name__ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = EsmForMaskedLM(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Optional[Any] = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self , _a , _a , _a , _a , _a , _a ):
__magic_name__ : int = self.num_labels
__magic_name__ : int = EsmForTokenClassification(config=_a )
model.to(_a )
model.eval()
__magic_name__ : Tuple = model(_a , attention_mask=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Dict = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[int] = config_and_inputs
__magic_name__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase__ = False
UpperCamelCase__ = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = ()
UpperCamelCase__ = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = EsmModelTester(self )
__magic_name__ : int = ConfigTester(self , config_class=_a , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : str = type
self.model_tester.create_and_check_model(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def SCREAMING_SNAKE_CASE ( self ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Optional[Any] = EsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : List[str] = EsmEmbeddings(config=_a )
__magic_name__ : Dict = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__magic_name__ : Tuple = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__magic_name__ : Dict = create_position_ids_from_input_ids(_a , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
__magic_name__ : str = EsmEmbeddings(config=_a )
__magic_name__ : Optional[Any] = torch.empty(2 , 4 , 30 )
__magic_name__ : str = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__magic_name__ : Tuple = torch.as_tensor([expected_single_positions, expected_single_positions] )
__magic_name__ : List[str] = embeddings.create_position_ids_from_inputs_embeds(_a )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(_a , _a ) ) )
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Esm does not support embedding resizing" )
def SCREAMING_SNAKE_CASE ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def SCREAMING_SNAKE_CASE ( self ):
pass
@require_torch
class _snake_case ( snake_case ):
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Dict = EsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Dict = model(_a )[0]
__magic_name__ : Optional[Any] = 33
__magic_name__ : Optional[int] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , _a )
__magic_name__ : List[Any] = torch.tensor(
[[[8.92_15, -10.58_98, -6.46_71], [-6.39_67, -13.91_14, -1.12_12], [-7.78_12, -13.95_16, -3.74_06]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self ):
with torch.no_grad():
__magic_name__ : Optional[int] = EsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" )
model.eval()
__magic_name__ : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__magic_name__ : Tuple = model(_a )[0]
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = torch.tensor(
[[[0.14_44, 0.54_13, 0.32_48], [0.30_34, 0.00_53, 0.31_08], [0.32_28, -0.24_99, 0.34_15]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1e-4 ) )
| 124 | 1 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
lowercase_: str = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
lowercase_: Union[str, Any] = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
snake_case__ : Optional[Any] = (images / 2 + 0.5).clamp(0 , 1)
snake_case__ : Dict = images.cpu().permute(0 , 2 , 3 , 1).float().numpy()
snake_case__ : Optional[int] = numpy_to_pil(UpperCAmelCase_)
return images
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
if images.ndim == 3:
snake_case__ : List[str] = images[None, ...]
snake_case__ : int = (images * 255).round().astype("""uint8""")
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
snake_case__ : Tuple = [Image.fromarray(image.squeeze() , mode="""L""") for image in images]
else:
snake_case__ : Optional[Any] = [Image.fromarray(UpperCAmelCase_) for image in images]
return pil_images
| 127 |
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase__ :
"""simple docstring"""
def __init__( self : Optional[int] , __a : Optional[Any] , __a : Tuple=1_3 , __a : Dict=7 , __a : List[Any]=True , __a : int=True , __a : List[Any]=9_9 , __a : Any=3_2 , __a : Optional[Any]=5 , __a : Optional[int]=4 , __a : str=3_7 , __a : Optional[int]="gelu" , __a : Dict=0.1 , __a : List[Any]=0.1 , __a : List[Any]=5_0 , __a : List[str]=0.02 , __a : Union[str, Any]=True , __a : Tuple=None , ):
snake_case__ : List[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : Optional[int] = is_training
snake_case__ : str = use_input_mask
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : List[str] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Any = use_labels
snake_case__ : Any = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def lowercase ( self : Optional[Any] ):
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=__a , initializer_range=self.initializer_range , )
def lowercase ( self : str ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : Dict = True
snake_case__ : List[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase ( self : str , __a : List[str] , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , **__a : Optional[Any] , ):
snake_case__ : Tuple = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : int = model(__a , attention_mask=__a )
snake_case__ : Optional[int] = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Tuple , __a : int , __a : int , __a : Union[str, Any] , __a : Optional[int] , __a : Any , __a : int , **__a : Dict , ):
snake_case__ : List[str] = True
snake_case__ : List[str] = BertGenerationEncoder(config=__a )
model.to(__a )
model.eval()
snake_case__ : Tuple = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , )
snake_case__ : Tuple = model(
__a , attention_mask=__a , encoder_hidden_states=__a , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Tuple , __a : Dict , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : int , __a : int , **__a : Tuple , ):
snake_case__ : Union[str, Any] = True
snake_case__ : Dict = True
snake_case__ : Optional[int] = BertGenerationDecoder(config=__a ).to(__a ).eval()
# first forward pass
snake_case__ : Optional[int] = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , use_cache=__a , )
snake_case__ : Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : int = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : int = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[Any] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Tuple = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , output_hidden_states=__a , )["""hidden_states"""][0]
snake_case__ : Dict = model(
__a , attention_mask=__a , encoder_hidden_states=__a , encoder_attention_mask=__a , past_key_values=__a , output_hidden_states=__a , )["""hidden_states"""][0]
# select random slice
snake_case__ : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def lowercase ( self : Any , __a : int , __a : List[Any] , __a : int , __a : List[Any] , *__a : List[str] , ):
snake_case__ : Tuple = BertGenerationDecoder(__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Dict ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
__UpperCamelCase : int = (BertGenerationDecoder,) if is_torch_available() else ()
__UpperCamelCase : Dict = (
{'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder}
if is_torch_available()
else {}
)
def lowercase ( self : str ):
snake_case__ : int = BertGenerationEncoderTester(self )
snake_case__ : int = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : Tuple ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowercase ( self : Dict ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[Any] = """bert"""
self.model_tester.create_and_check_model(__a , __a , __a , __a )
def lowercase ( self : Tuple ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__a )
def lowercase ( self : Tuple ):
# This regression test was failing with PyTorch < 1.3
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Any = None
self.model_tester.create_and_check_model_as_decoder(
__a , __a , __a , __a , __a , __a , )
def lowercase ( self : List[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*__a )
@slow
def lowercase ( self : Optional[int] ):
snake_case__ : Dict = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
self.assertIsNotNone(__a )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = BertGenerationEncoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Union[str, Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : Tuple = model(__a )[0]
snake_case__ : Optional[Any] = torch.Size([1, 8, 1_0_2_4] )
self.assertEqual(output.shape , __a )
snake_case__ : List[str] = torch.tensor(
[[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
@require_torch
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase ( self : Tuple ):
snake_case__ : List[Any] = BertGenerationDecoder.from_pretrained("""google/bert_for_seq_generation_L-24_bbc_encoder""" )
snake_case__ : Optional[Any] = torch.tensor([[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_2_6, 3_8_9_9, 2_0_0_3, 1_0_1_4_0, 1_0_2]] )
with torch.no_grad():
snake_case__ : List[Any] = model(__a )[0]
snake_case__ : Tuple = torch.Size([1, 8, 5_0_3_5_8] )
self.assertEqual(output.shape , __a )
snake_case__ : List[str] = torch.tensor(
[[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 127 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : str=None , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
lowercase : List[Any] =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A__ ( self : List[str] , UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Any ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : Tuple ={'''dtype''': torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : int ={'''dtype''': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : List[Any] =np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self : List[Any] , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '''__array__''' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : List[Any] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : dict ) -> Any:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : List[str] =self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowercase : Any =self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
lowercase : int =self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowercase : Tuple =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowercase : Optional[Any] =self.recursive_tensorize(UpperCAmelCase )
lowercase : Any =self._consolidate(UpperCAmelCase )
return column
def A__ ( self : str , UpperCAmelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowercase : Tuple =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowercase : List[str] =self.python_features_decoder.decode_batch(UpperCAmelCase )
lowercase : Dict =self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowercase : str =self._consolidate(batch[column_name] )
return batch
| 94 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
_UpperCamelCase = logging.get_logger(__name__)
class lowerCamelCase__ ( snake_case ):
def __init__( self ,*A ,**A ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,A ,)
super().__init__(*A ,**A )
| 341 | 0 |
from collections.abc import Sequence
from queue import Queue
class lowerCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , __magic_name__ : List[str] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : str=None , __magic_name__ : List[str]=None ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = start
SCREAMING_SNAKE_CASE_ = end
SCREAMING_SNAKE_CASE_ = val
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = left
SCREAMING_SNAKE_CASE_ = right
def __repr__( self : Optional[Any] ) -> Optional[Any]:
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class lowerCamelCase :
"""simple docstring"""
def __init__( self : int , __magic_name__ : Sequence , __magic_name__ : List[str] ) -> str:
SCREAMING_SNAKE_CASE_ = collection
SCREAMING_SNAKE_CASE_ = function
if self.collection:
SCREAMING_SNAKE_CASE_ = self._build_tree(0 , len(__magic_name__ ) - 1 )
def __A ( self : str , __magic_name__ : List[str] , __magic_name__ : List[Any] ) -> List[str]:
self._update_tree(self.root , __magic_name__ , __magic_name__ )
def __A ( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Dict ) -> List[Any]:
return self._query_range(self.root , __magic_name__ , __magic_name__ )
def __A ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] ) -> Optional[Any]:
if start == end:
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.collection[start] )
SCREAMING_SNAKE_CASE_ = (start + end) // 2
SCREAMING_SNAKE_CASE_ = self._build_tree(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self._build_tree(mid + 1 , __magic_name__ )
return SegmentTreeNode(__magic_name__ , __magic_name__ , self.fn(left.val , right.val ) , __magic_name__ , __magic_name__ )
def __A ( self : List[str] , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : List[str] ) -> Optional[Any]:
if node.start == i and node.end == i:
SCREAMING_SNAKE_CASE_ = val
return
if i <= node.mid:
self._update_tree(node.left , __magic_name__ , __magic_name__ )
else:
self._update_tree(node.right , __magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = self.fn(node.left.val , node.right.val )
def __A ( self : List[str] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any ) -> int:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __magic_name__ , __magic_name__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __magic_name__ , node.mid ) , self._query_range(node.right , node.mid + 1 , __magic_name__ ) , )
else:
# range in right child tree
return self._query_range(node.right , __magic_name__ , __magic_name__ )
def __A ( self : Optional[int] ) -> str:
if self.root is not None:
SCREAMING_SNAKE_CASE_ = Queue()
queue.put(self.root )
while not queue.empty():
SCREAMING_SNAKE_CASE_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
A : Tuple = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 356 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Optional[Any] = ["BartphoTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 356 | 1 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : Dict = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : str = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[Any] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
__UpperCAmelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Optional[int] = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : List[Any] = None
if token is not None:
__UpperCAmelCase : List[Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : List[Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
__UpperCAmelCase : Union[str, Any] = requests.get(__lowerCamelCase , headers=__lowerCamelCase ).json()
__UpperCAmelCase : Optional[int] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
__UpperCAmelCase : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(__lowerCamelCase ):
__UpperCAmelCase : Any = requests.get(url + f"""&page={i + 2}""" , headers=__lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] ):
__UpperCAmelCase : Union[str, Any] = None
if token is not None:
__UpperCAmelCase : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": f"""Bearer {token}"""}
__UpperCAmelCase : Tuple = requests.get(__lowerCamelCase , headers=__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Optional[Any] = result.headers["""Location"""]
__UpperCAmelCase : List[Any] = requests.get(__lowerCamelCase , allow_redirects=__lowerCamelCase )
__UpperCAmelCase : Dict = os.path.join(__lowerCamelCase , f"""{artifact_name}.zip""" )
with open(__lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase__ ( __lowerCamelCase : Tuple , __lowerCamelCase : Optional[int]=None ):
__UpperCAmelCase : str = []
__UpperCAmelCase : Optional[Any] = []
__UpperCAmelCase : Tuple = None
with zipfile.ZipFile(__lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCamelCase ) as f:
for line in f:
__UpperCAmelCase : Dict = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__UpperCAmelCase : Union[str, Any] = line[: line.index(""": """ )]
__UpperCAmelCase : str = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
__UpperCAmelCase : Optional[int] = line[len("""FAILED """ ) :]
failed_tests.append(__lowerCamelCase )
elif filename == "job_name.txt":
__UpperCAmelCase : Optional[int] = line
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
f"""`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCamelCase )} for `errors` """
f"""and {len(__lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
__UpperCAmelCase : Dict = None
if job_name and job_links:
__UpperCAmelCase : List[str] = job_links.get(__lowerCamelCase , __lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
__UpperCAmelCase : Union[str, Any] = [x + [y] + [job_link] for x, y in zip(__lowerCamelCase , __lowerCamelCase )]
return result
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : Any=None ):
__UpperCAmelCase : Optional[int] = []
__UpperCAmelCase : List[Any] = [os.path.join(__lowerCamelCase , __lowerCamelCase ) for p in os.listdir(__lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCamelCase , job_links=__lowerCamelCase ) )
return errors
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : int=None ):
__UpperCAmelCase : str = Counter()
counter.update([x[1] for x in logs] )
__UpperCAmelCase : Union[str, Any] = counter.most_common()
__UpperCAmelCase : Optional[int] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__UpperCAmelCase : str = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__UpperCAmelCase : Dict = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Tuple ):
__UpperCAmelCase : List[str] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
__UpperCAmelCase : List[Any] = test.split("""/""" )[2]
else:
__UpperCAmelCase : Any = None
return test
def lowerCamelCase__ ( __lowerCamelCase : List[Any] , __lowerCamelCase : List[str]=None ):
__UpperCAmelCase : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__UpperCAmelCase : Tuple = [x for x in logs if x[2] is not None]
__UpperCAmelCase : Union[str, Any] = {x[2] for x in logs}
__UpperCAmelCase : Optional[int] = {}
for test in tests:
__UpperCAmelCase : Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__UpperCAmelCase : List[str] = counter.most_common()
__UpperCAmelCase : Any = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__UpperCAmelCase : Dict = sum(error_counts.values() )
if n_errors > 0:
__UpperCAmelCase : str = {"""count""": n_errors, """errors""": error_counts}
__UpperCAmelCase : Any = dict(sorted(r.items() , key=lambda __lowerCamelCase : item[1]["count"] , reverse=__lowerCamelCase ) )
return r
def lowerCamelCase__ ( __lowerCamelCase : Union[str, Any] ):
__UpperCAmelCase : Union[str, Any] = """| no. | error | status |"""
__UpperCAmelCase : str = """|-:|:-|:-|"""
__UpperCAmelCase : str = [header, sep]
for error in reduced_by_error:
__UpperCAmelCase : Tuple = reduced_by_error[error]["""count"""]
__UpperCAmelCase : List[Any] = f"""| {count} | {error[:100]} | |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase : int = """| model | no. of errors | major error | count |"""
__UpperCAmelCase : int = """|-:|-:|-:|-:|"""
__UpperCAmelCase : int = [header, sep]
for model in reduced_by_model:
__UpperCAmelCase : Optional[int] = reduced_by_model[model]["""count"""]
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = list(reduced_by_model[model]["""errors"""].items() )[0]
__UpperCAmelCase : Dict = f"""| {model} | {count} | {error[:60]} | {_count} |"""
lines.append(__lowerCamelCase )
return "\n".join(__lowerCamelCase )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
a : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : List[str] = get_job_links(args.workflow_run_id, token=args.token)
a : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : Dict = k.find(" / ")
a : Optional[Any] = k[index + len(" / ") :]
a : Tuple = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : List[str] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : List[str] = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : Optional[Any] = make_github_table(reduced_by_error)
a : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 63 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__)
def a__ (__lowercase :str ) -> List[List[ImageInput]]:
if isinstance(__lowercase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowercase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowercase ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = ["pixel_values"]
def __init__( self ,A__ = True ,A__ = None ,A__ = PILImageResampling.BILINEAR ,A__ = True ,A__ = None ,A__ = True ,A__ = 1 / 255 ,A__ = True ,A__ = True ,A__ = None ,A__ = None ,**A__ ,):
super().__init__(**A__ )
_A : Tuple = size if size is not None else {'''shortest_edge''': 256}
_A : str = get_size_dict(A__ ,default_to_square=A__ )
_A : str = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
_A : str = get_size_dict(A__ ,param_name='''crop_size''' )
_A : Tuple = do_resize
_A : Optional[Any] = size
_A : Optional[Any] = do_center_crop
_A : List[str] = crop_size
_A : Dict = resample
_A : Tuple = do_rescale
_A : int = rescale_factor
_A : List[str] = offset
_A : List[Any] = do_normalize
_A : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_A : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self ,A__ ,A__ ,A__ = PILImageResampling.BILINEAR ,A__ = None ,**A__ ,):
_A : str = get_size_dict(A__ ,default_to_square=A__ )
if "shortest_edge" in size:
_A : Optional[Any] = get_resize_output_image_size(A__ ,size['''shortest_edge'''] ,default_to_square=A__ )
elif "height" in size and "width" in size:
_A : str = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(A__ ,size=A__ ,resample=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = None ,**A__ ,):
_A : Optional[int] = get_size_dict(A__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(A__ ,size=(size['''height'''], size['''width''']) ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ = True ,A__ = None ,**A__ ,):
_A : Any = image.astype(np.floataa )
if offset:
_A : List[str] = image - (scale / 2)
return rescale(A__ ,scale=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ ,A__ ,A__ = None ,**A__ ,):
return normalize(A__ ,mean=A__ ,std=A__ ,data_format=A__ ,**A__ )
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
_A : Dict = to_numpy_array(A__ )
if do_resize:
_A : Any = self.resize(image=A__ ,size=A__ ,resample=A__ )
if do_center_crop:
_A : str = self.center_crop(A__ ,size=A__ )
if do_rescale:
_A : Optional[int] = self.rescale(image=A__ ,scale=A__ ,offset=A__ )
if do_normalize:
_A : Dict = self.normalize(image=A__ ,mean=A__ ,std=A__ )
_A : Union[str, Any] = to_channel_dimension_format(A__ ,A__ )
return image
def A__ ( self ,A__ ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = None ,A__ = ChannelDimension.FIRST ,**A__ ,):
_A : Optional[int] = do_resize if do_resize is not None else self.do_resize
_A : List[str] = resample if resample is not None else self.resample
_A : Union[str, Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
_A : Tuple = do_rescale if do_rescale is not None else self.do_rescale
_A : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_A : List[str] = offset if offset is not None else self.offset
_A : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
_A : Optional[Any] = image_mean if image_mean is not None else self.image_mean
_A : Dict = image_std if image_std is not None else self.image_std
_A : str = size if size is not None else self.size
_A : int = get_size_dict(A__ ,default_to_square=A__ )
_A : Any = crop_size if crop_size is not None else self.crop_size
_A : Optional[int] = get_size_dict(A__ ,param_name='''crop_size''' )
if not valid_images(A__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
_A : List[str] = make_batched(A__ )
_A : Tuple = [
[
self._preprocess_image(
image=A__ ,do_resize=A__ ,size=A__ ,resample=A__ ,do_center_crop=A__ ,crop_size=A__ ,do_rescale=A__ ,rescale_factor=A__ ,offset=A__ ,do_normalize=A__ ,image_mean=A__ ,image_std=A__ ,data_format=A__ ,)
for img in video
]
for video in videos
]
_A : Optional[int] = {'''pixel_values''': videos}
return BatchFeature(data=A__ ,tensor_type=A__ )
| 206 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCamelCase = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 702 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _SCREAMING_SNAKE_CASE (UpperCamelCase ):
lowerCAmelCase = ["""vqvae"""]
def __init__( self : Tuple , UpperCamelCase : AutoencoderKL , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Mel , UpperCamelCase : Union[DDIMScheduler, DDPMScheduler] , )->Tuple:
super().__init__()
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase , mel=UpperCamelCase , vqvae=UpperCamelCase )
def __snake_case ( self : List[Any] )->int:
return 5_0 if isinstance(self.scheduler , UpperCamelCase ) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCamelCase : int = 1 , UpperCamelCase : str = None , UpperCamelCase : np.ndarray = None , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 , UpperCamelCase : int = None , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : float = 0 , UpperCamelCase : torch.Generator = None , UpperCamelCase : float = 0 , UpperCamelCase : torch.Tensor = None , UpperCamelCase : torch.Tensor = None , UpperCamelCase : Any=True , )->Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
__SCREAMING_SNAKE_CASE : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__SCREAMING_SNAKE_CASE : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__SCREAMING_SNAKE_CASE : Any = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCamelCase , device=self.device , )
__SCREAMING_SNAKE_CASE : Any = noise
__SCREAMING_SNAKE_CASE : Any = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCamelCase , UpperCamelCase )
__SCREAMING_SNAKE_CASE : str = self.mel.audio_slice_to_image(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.frombuffer(input_image.tobytes() , dtype="uint8" ).reshape(
(input_image.height, input_image.width) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (input_image / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.vqvae.encode(torch.unsqueeze(UpperCamelCase , 0 ) ).latent_dist.sample(
generator=UpperCamelCase )[0]
__SCREAMING_SNAKE_CASE : int = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__SCREAMING_SNAKE_CASE : List[str] = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , self.scheduler.timesteps[start_step - 1] )
__SCREAMING_SNAKE_CASE : int = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(mask_start_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : int = int(mask_end_secs * pixels_per_second )
__SCREAMING_SNAKE_CASE : Any = self.scheduler.add_noise(UpperCamelCase , UpperCamelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = self.unet(UpperCamelCase , UpperCamelCase , UpperCamelCase )["sample"]
else:
__SCREAMING_SNAKE_CASE : int = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
if isinstance(self.scheduler , UpperCamelCase ):
__SCREAMING_SNAKE_CASE : int = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , eta=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
else:
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(
model_output=UpperCamelCase , timestep=UpperCamelCase , sample=UpperCamelCase , generator=UpperCamelCase , )["prev_sample"]
if mask is not None:
if mask_start > 0:
__SCREAMING_SNAKE_CASE : int = mask[:, step, :, :mask_start]
if mask_end > 0:
__SCREAMING_SNAKE_CASE : Optional[Any] = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__SCREAMING_SNAKE_CASE : Any = 1 / self.vqvae.config.scaling_factor * images
__SCREAMING_SNAKE_CASE : Any = self.vqvae.decode(UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (images / 2 + 0.5).clamp(0 , 1 )
__SCREAMING_SNAKE_CASE : str = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__SCREAMING_SNAKE_CASE : Tuple = (images * 2_5_5).round().astype("uint8" )
__SCREAMING_SNAKE_CASE : Optional[int] = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCamelCase , mode="RGB" ).convert("L" ) for _ in images) )
__SCREAMING_SNAKE_CASE : List[str] = [self.mel.image_to_audio(UpperCamelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCamelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(UpperCamelCase ) )
@torch.no_grad()
def __snake_case ( self : Dict , UpperCamelCase : List[Image.Image] , UpperCamelCase : int = 5_0 )->np.ndarray:
assert isinstance(self.scheduler , UpperCamelCase )
self.scheduler.set_timesteps(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = np.array(
[np.frombuffer(image.tobytes() , dtype="uint8" ).reshape((1, image.height, image.width) ) for image in images] )
__SCREAMING_SNAKE_CASE : Dict = (sample / 2_5_5) * 2 - 1
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Tensor(UpperCamelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__SCREAMING_SNAKE_CASE : Optional[int] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler.alphas_cumprod[t]
__SCREAMING_SNAKE_CASE : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__SCREAMING_SNAKE_CASE : Dict = 1 - alpha_prod_t
__SCREAMING_SNAKE_CASE : List[Any] = self.unet(UpperCamelCase , UpperCamelCase )["sample"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__SCREAMING_SNAKE_CASE : Optional[Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def __snake_case ( UpperCamelCase : torch.Tensor , UpperCamelCase : torch.Tensor , UpperCamelCase : float )->torch.Tensor:
__SCREAMING_SNAKE_CASE : List[str] = acos(torch.dot(torch.flatten(UpperCamelCase ) , torch.flatten(UpperCamelCase ) ) / torch.norm(UpperCamelCase ) / torch.norm(UpperCamelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(UpperCamelCase ) + sin(alpha * theta ) * xa / sin(UpperCamelCase )
| 447 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
snake_case__ = OpenAIGPTTokenizer
snake_case__ = OpenAIGPTTokenizerFast
snake_case__ = True
snake_case__ = False
def a ( self : Dict ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "l o", "lo w", "e r</w>", ""]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
return "lower newer", "lower newer"
def a ( self : List[str] ) -> Dict:
lowerCAmelCase__ = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ = "lower"
lowerCAmelCase__ = ["low", "er</w>"]
lowerCAmelCase__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = tokens + ["<unk>"]
lowerCAmelCase__ = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Any=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
lowerCAmelCase__ = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# Simple input
lowerCAmelCase__ = "This is a simple input"
lowerCAmelCase__ = ["This is a simple input 1", "This is a simple input 2"]
lowerCAmelCase__ = ("This is a simple input", "This is a pair")
lowerCAmelCase__ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE__ , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE__ , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding="max_length" , )
def a ( self : Optional[int] ) -> int:
pass
@require_ftfy
@require_spacy
@require_tokenizers
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
pass
| 61 |
import os
# Precomputes a list of the 100 first triangular numbers
UpperCamelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = os.path.dirname(os.path.realpath(lowerCAmelCase_ ) )
lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "words.txt" )
lowerCAmelCase__ = ""
with open(lowerCAmelCase_ ) as f:
lowerCAmelCase__ = f.readline()
lowerCAmelCase__ = [word.strip("\"" ) for word in words.strip("\r\n" ).split("," )]
lowerCAmelCase__ = [
word
for word in [sum(ord(lowerCAmelCase_ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(lowerCAmelCase_ )
if __name__ == "__main__":
print(solution())
| 61 | 1 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
# bit count represents no. of bits in the gray code
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
_SCREAMING_SNAKE_CASE = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
_SCREAMING_SNAKE_CASE = int(sequence[i] , 2 )
return sequence
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> list:
"""simple docstring"""
# The approach is a recursive one
# Base case achieved when either n = 0 or n=1
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
_SCREAMING_SNAKE_CASE = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
_SCREAMING_SNAKE_CASE = gray_code_sequence_string(bit_count - 1 )
_SCREAMING_SNAKE_CASE = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
_SCREAMING_SNAKE_CASE = """0""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
_SCREAMING_SNAKE_CASE = """1""" + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class _a :
"""simple docstring"""
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=True , A__=True , A__=True , A__=99 , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=16 , A__=2 , A__=0.02 , A__=3 , A__=4 , A__=None , ) -> int:
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = 13
_SCREAMING_SNAKE_CASE = 7
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = 99
_SCREAMING_SNAKE_CASE = 32
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = 37
_SCREAMING_SNAKE_CASE = """gelu"""
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 0.1
_SCREAMING_SNAKE_CASE = 5_12
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 0.02
_SCREAMING_SNAKE_CASE = 3
_SCREAMING_SNAKE_CASE = 4
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A__ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModel(config=A__ )
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_SCREAMING_SNAKE_CASE = [input_ids, input_mask]
_SCREAMING_SNAKE_CASE = model(A__ )
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> str:
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )["""logits"""]
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Dict:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Any:
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=A__ )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(A__ , 1 ) , (1, self.num_choices, 1) )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
_SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=A__ )
_SCREAMING_SNAKE_CASE = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_SCREAMING_SNAKE_CASE = model(A__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) , (
_SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _a (_lowerCamelCase , _lowerCamelCase , unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> str:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A__ )
def UpperCamelCase ( self ) -> int:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A__ )
def UpperCamelCase ( self ) -> Dict:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A__ )
def UpperCamelCase ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> str:
_SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(A__ )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
_SCREAMING_SNAKE_CASE = model(A__ )[0]
# TODO Replace vocab size
_SCREAMING_SNAKE_CASE = 5_00_00
_SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape , A__ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
_SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A__ , atol=1E-4 )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
_SCREAMING_SNAKE_CASE = emba(input_ids.shape )
_SCREAMING_SNAKE_CASE = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
def UpperCamelCase ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
_SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(A__ , A__ , atol=self.tolerance )
@require_tf
class _a (unittest.TestCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 1E-4
def UpperCamelCase ( self ) -> int:
# 2,12,16,64
_SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
_SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
_SCREAMING_SNAKE_CASE = embed_positions([2, 16, 7_68] )[None, None, :, :]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A__ , A__ , A__ )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A__ , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
class __lowercase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = BartphoTokenizer
_a = False
_a = True
def UpperCamelCase__ ( self ) -> Union[str, Any]:
super().setUp()
__a = ['▁This', '▁is', '▁a', '▁t', 'est']
__a = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__a = {'unk_token': '<unk>'}
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['monolingual_vocab_file'] )
with open(self.monolingual_vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
__a = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , **UpperCamelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
__a = 'This is a là test'
__a = 'This is a<unk><unk> test'
return input_text, output_text
def UpperCamelCase__ ( self ) -> Optional[Any]:
__a = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map )
__a = 'This is a là test'
__a = '▁This ▁is ▁a ▁l à ▁t est'.split()
__a = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__a = tokens + [tokenizer.unk_token]
__a = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
| 539 |
from __future__ import annotations
def a__ ( A__, A__ = None, A__ = None ):
if start is None:
SCREAMING_SNAKE_CASE_ : List[str] = 0
if end is None:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(A__ ) - 1
if start >= end:
return
SCREAMING_SNAKE_CASE_ : Tuple = (start + end) // 2
slowsort(A__, A__, A__ )
slowsort(A__, mid + 1, A__ )
if sequence[end] < sequence[mid]:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence[mid], sequence[end]
slowsort(A__, A__, end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 101 | 0 |
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
__snake_case :Union[str, Any] =logging.get_logger(__name__)
def lowerCamelCase_ ( lowerCAmelCase__ : int ) -> Any:
'''simple docstring'''
A = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
A = 128
elif "12-12" in model_name:
A = 12
A = 12
elif "14-14" in model_name:
A = 14
A = 14
elif "16-16" in model_name:
A = 16
A = 16
else:
raise ValueError('Model not supported' )
A = 'huggingface/label-files'
if "speech-commands" in model_name:
A = 35
A = 'speech-commands-v2-id2label.json'
else:
A = 527
A = 'audioset-id2label.json'
A = json.load(open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase_ ( lowerCAmelCase__ : Any ) -> List[str]:
'''simple docstring'''
if "module.v" in name:
A = name.replace('module.v' , 'audio_spectrogram_transformer' )
if "cls_token" in name:
A = name.replace('cls_token' , 'embeddings.cls_token' )
if "dist_token" in name:
A = name.replace('dist_token' , 'embeddings.distillation_token' )
if "pos_embed" in name:
A = name.replace('pos_embed' , 'embeddings.position_embeddings' )
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
# transformer blocks
if "blocks" in name:
A = name.replace('blocks' , 'encoder.layer' )
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
A = name.replace('audio_spectrogram_transformer.norm' , 'audio_spectrogram_transformer.layernorm' )
# classifier head
if "module.mlp_head.0" in name:
A = name.replace('module.mlp_head.0' , 'classifier.layernorm' )
if "module.mlp_head.1" in name:
A = name.replace('module.mlp_head.1' , 'classifier.dense' )
return name
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
A = key.split('.' )
A = int(key_split[3] )
A = config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = val
return orig_state_dict
def lowerCamelCase_ ( lowerCAmelCase__ : Tuple ) -> List[str]:
'''simple docstring'''
A = [
'module.v.head.weight',
'module.v.head.bias',
'module.v.head_dist.weight',
'module.v.head_dist.bias',
]
for k in ignore_keys:
state_dict.pop(lowerCAmelCase__ , lowerCAmelCase__ )
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : str=False ) -> List[Any]:
'''simple docstring'''
A = get_audio_spectrogram_transformer_config(lowerCAmelCase__ )
A = {
'ast-finetuned-audioset-10-10-0.4593': (
'https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.450': (
'https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448': (
'https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'
),
'ast-finetuned-audioset-10-10-0.448-v2': (
'https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'
),
'ast-finetuned-audioset-12-12-0.447': (
'https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'
),
'ast-finetuned-audioset-14-14-0.443': (
'https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'
),
'ast-finetuned-audioset-16-16-0.442': (
'https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'
),
'ast-finetuned-speech-commands-v2': (
'https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'
),
}
# load original state_dict
A = model_name_to_url[model_name]
A = torch.hub.load_state_dict_from_url(lowerCAmelCase__ , map_location='cpu' )
# remove some keys
remove_keys(lowerCAmelCase__ )
# rename some keys
A = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
# load 🤗 model
A = ASTForAudioClassification(lowerCAmelCase__ )
model.eval()
model.load_state_dict(lowerCAmelCase__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
A = -4.2677393 if 'speech-commands' not in model_name else -6.845978
A = 4.5689974 if 'speech-commands' not in model_name else 5.5654526
A = 1024 if 'speech-commands' not in model_name else 128
A = ASTFeatureExtractor(mean=lowerCAmelCase__ , std=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
if "speech-commands" in model_name:
A = load_dataset('speech_commands' , 'v0.02' , split='validation' )
A = dataset[0]['audio']['array']
else:
A = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' , filename='sample_audio.flac' , repo_type='dataset' , )
A , A = torchaudio.load(lowerCAmelCase__ )
A = waveform.squeeze().numpy()
A = feature_extractor(lowerCAmelCase__ , sampling_rate=16000 , return_tensors='pt' )
# forward pass
A = model(**lowerCAmelCase__ )
A = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
A = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
A = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
A = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
A = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
A = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
A = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
A = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
A = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('Unknown model name' )
if not torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError('Logits don\'t match' )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(lowerCAmelCase__ )
if push_to_hub:
print('Pushing model and feature extractor to the hub...' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
__snake_case :int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__snake_case :Optional[int] =parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 224 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=99 , __UpperCamelCase : Any=32 , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[str]=None , ) -> Union[str, Any]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.0_2
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = TFConvBertModel(config=__UpperCamelCase )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> str:
A = TFConvBertForMaskedLM(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
A = self.num_labels
A = TFConvBertForSequenceClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = self.num_choices
A = TFConvBertForMultipleChoice(config=__UpperCamelCase )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Any:
A = self.num_labels
A = TFConvBertForTokenClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ) -> str:
A = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : int = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Optional[int] = False
A_ : Any = False
A_ : str = False
def __UpperCamelCase ( self : int ) -> Any:
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(__UpperCamelCase , 'use_cache' ):
A = True
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
for model_class in self.all_model_classes:
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = model_class(__UpperCamelCase )
A = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
A = os.path.join(__UpperCamelCase , 'saved_model' , '1' )
A = tf.keras.models.load_model(__UpperCamelCase )
A = model(__UpperCamelCase )
if self.is_encoder_decoder:
A = outputs['encoder_hidden_states']
A = outputs['encoder_attentions']
else:
A = outputs['hidden_states']
A = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : str ) -> str:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase : List[Any] ):
A = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase : Dict ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__UpperCamelCase )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) | 224 | 1 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( a__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase__ = 1 , lowerCamelCase__ = None , lowerCamelCase__ = 5_0 , lowerCamelCase__ = "pil" , lowerCamelCase__ = True , **lowerCamelCase__ , ):
'''simple docstring'''
UpperCamelCase = torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_UpperCAmelCase , )
UpperCamelCase = image.to(self.device )
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase = self.unet(_UpperCAmelCase , _UpperCAmelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=_UpperCAmelCase ), "This is a local test"
| 212 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=[30, 30] , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=10 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=None , _UpperCAmelCase=8 , _UpperCAmelCase=10 , ):
'''simple docstring'''
__A : Union[str, Any] = parent
__A : Tuple = batch_size
__A : List[str] = image_size
__A : Dict = patch_size
__A : Optional[Any] = num_channels
__A : Tuple = is_training
__A : Dict = use_labels
__A : List[Any] = hidden_size
__A : Tuple = num_hidden_layers
__A : int = num_attention_heads
__A : Optional[int] = intermediate_size
__A : Tuple = hidden_act
__A : Any = hidden_dropout_prob
__A : Optional[Any] = attention_probs_dropout_prob
__A : List[Any] = type_sequence_label_size
__A : List[Any] = initializer_range
__A : Optional[int] = num_labels
__A : List[Any] = scope
__A : Any = n_targets
__A : Union[str, Any] = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
__A : List[str] = (image_size[1] // patch_size) * (image_size[0] // patch_size)
__A : int = num_patches + 1 + self.num_detection_tokens
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]])
__A : Tuple = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
__A : List[Any] = []
for i in range(self.batch_size):
__A : Optional[int] = {}
__A : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=_UpperCAmelCase)
__A : str = torch.rand(self.n_targets , 4 , device=_UpperCAmelCase)
labels.append(_UpperCAmelCase)
__A : Any = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosModel(config=_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : Dict = model(_UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Any = YolosForObjectDetection(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
__A : str = model(pixel_values=_UpperCAmelCase)
__A : List[str] = model(_UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
__A : Union[str, Any] = model(pixel_values=_UpperCAmelCase , labels=_UpperCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1))
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
__A ,__A ,__A : Tuple = config_and_inputs
__A : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (a__ , a__ , unittest.TestCase ):
lowerCAmelCase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowerCAmelCase = (
{'''feature-extraction''': YolosModel, '''object-detection''': YolosForObjectDetection} if is_torch_available() else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase)
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
__A : Any = []
for i in range(self.model_tester.batch_size):
__A : Tuple = {}
__A : Tuple = torch.ones(
size=(self.model_tester.n_targets,) , device=_UpperCAmelCase , dtype=torch.long)
__A : Optional[Any] = torch.ones(
self.model_tester.n_targets , 4 , device=_UpperCAmelCase , dtype=torch.float)
labels.append(_UpperCAmelCase)
__A : str = labels
return inputs_dict
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = YolosModelTester(self)
__A : Dict = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Tuple = model_class(_UpperCAmelCase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
__A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[Any] = model_class(_UpperCAmelCase)
__A : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__A : int = [*signature.parameters.keys()]
__A : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A ,__A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = True
# in YOLOS, the seq_len is different
__A : Dict = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
__A : Dict = True
__A : Dict = False
__A : Union[str, Any] = True
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : Any = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__A : List[Any] = True
__A : List[str] = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__A : str = len(_UpperCAmelCase)
# Check attention is always last and order is fine
__A : Dict = True
__A : Dict = True
__A : Dict = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Union[str, Any] = 1
self.assertEqual(out_len + added_hidden_states , len(_UpperCAmelCase))
__A : Optional[Any] = outputs.attentions
self.assertEqual(len(_UpperCAmelCase) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
__A : Tuple = model_class(_UpperCAmelCase)
model.to(_UpperCAmelCase)
model.eval()
with torch.no_grad():
__A : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase))
__A : Optional[Any] = outputs.hidden_states
__A : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
# YOLOS has a different seq_length
__A : Dict = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
__A ,__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : List[str] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__A : Optional[int] = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*_UpperCAmelCase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : List[Any] = YolosModel.from_pretrained(_UpperCAmelCase)
self.assertIsNotNone(_UpperCAmelCase)
def _lowerCAmelCase ( ) -> int:
__A : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(_UpperCAmelCase)
__A : Any = self.default_image_processor
__A : str = prepare_img()
__A : int = image_processor(images=_UpperCAmelCase , return_tensors='pt').to(_UpperCAmelCase)
# forward pass
with torch.no_grad():
__A : str = model(inputs.pixel_values)
# verify outputs
__A : Tuple = torch.Size((1, 100, 92))
self.assertEqual(outputs.logits.shape , _UpperCAmelCase)
__A : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=_UpperCAmelCase , )
__A : int = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _UpperCAmelCase , atol=1e-4))
# verify postprocessing
__A : List[str] = image_processor.post_process_object_detection(
_UpperCAmelCase , threshold=0.3 , target_sizes=[image.size[::-1]])[0]
__A : Optional[int] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(_UpperCAmelCase)
__A : Union[str, Any] = [75, 75, 17, 63, 17]
__A : Any = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(_UpperCAmelCase)
self.assertEqual(len(results['scores']) , 5)
self.assertTrue(torch.allclose(results['scores'] , _UpperCAmelCase , atol=1e-4))
self.assertSequenceEqual(results['labels'].tolist() , _UpperCAmelCase)
self.assertTrue(torch.allclose(results['boxes'][0, :] , _UpperCAmelCase)) | 8 | 0 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = DownBlockaD # noqa F405
a = "down"
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = ResnetDownsampleBlockaD # noqa F405
a = "down"
def lowercase_ ( self : Optional[int] ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnDownBlockaD # noqa F405
a = "down"
def lowercase_ ( self : Dict ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = CrossAttnDownBlockaD # noqa F405
a = "down"
def lowercase_ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = SimpleCrossAttnDownBlockaD # noqa F405
a = "down"
@property
def lowercase_ ( self : int ) -> Union[str, Any]:
return super().get_dummy_input(include_encoder_hidden_states=__lowerCamelCase )
def lowercase_ ( self : int ) -> List[str]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowercase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = SkipDownBlockaD # noqa F405
a = "down"
@property
def lowercase_ ( self : Any ) -> Dict:
return super().get_dummy_input(include_skip_sample=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnSkipDownBlockaD # noqa F405
a = "down"
@property
def lowercase_ ( self : List[str] ) -> int:
return super().get_dummy_input(include_skip_sample=__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = DownEncoderBlockaD # noqa F405
a = "down"
@property
def lowercase_ ( self : Dict ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__lowerCamelCase )
def lowercase_ ( self : List[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnDownEncoderBlockaD # noqa F405
a = "down"
@property
def lowercase_ ( self : Dict ) -> Optional[int]:
return super().get_dummy_input(include_temb=__lowerCamelCase )
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 32,
'''out_channels''': 32,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaD # noqa F405
a = "mid"
def lowercase_ ( self : Optional[Any] ) -> Any:
SCREAMING_SNAKE_CASE__ = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Tuple ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaDCrossAttn # noqa F405
a = "mid"
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
def lowercase_ ( self : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UNetMidBlockaDSimpleCrossAttn # noqa F405
a = "mid"
@property
def lowercase_ ( self : Optional[int] ) -> List[Any]:
return super().get_dummy_input(include_encoder_hidden_states=__lowerCamelCase )
def lowercase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def lowercase_ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = ResnetUpsampleBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Optional[Any] ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def lowercase_ ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = CrossAttnUpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : int ) -> List[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> Dict:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
def lowercase_ ( self : int ) -> Dict:
SCREAMING_SNAKE_CASE__ = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = SimpleCrossAttnUpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Tuple ) -> Optional[Any]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase , include_encoder_hidden_states=__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = super().prepare_init_args_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 32
return init_dict, inputs_dict
def lowercase_ ( self : Any ) -> Dict:
SCREAMING_SNAKE_CASE__ = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnUpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Optional[Any] ) -> Optional[int]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
@unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' )
def lowercase_ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = SkipUpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Union[str, Any] ) -> List[str]:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnSkipUpBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : Union[str, Any] ) -> Tuple:
return super().get_dummy_input(include_res_hidden_states_tuple=__lowerCamelCase )
def lowercase_ ( self : int ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = UpDecoderBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : List[str] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__lowerCamelCase )
def lowercase_ ( self : Tuple ) -> Dict:
SCREAMING_SNAKE_CASE__ = {'''in_channels''': 32, '''out_channels''': 32}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : Union[str, Any] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__lowerCamelCase )
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = AttnUpDecoderBlockaD # noqa F405
a = "up"
@property
def lowercase_ ( self : List[Any] ) -> Union[str, Any]:
return super().get_dummy_input(include_temb=__lowerCamelCase )
def lowercase_ ( self : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = {'''in_channels''': 32, '''out_channels''': 32}
SCREAMING_SNAKE_CASE__ = self.dummy_input
return init_dict, inputs_dict
def lowercase_ ( self : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__lowerCamelCase )
| 472 |
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any]=3 , __lowerCamelCase : int=32 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Optional[int]=10 , __lowerCamelCase : List[Any]=[8, 16, 32, 64] , __lowerCamelCase : Dict=[1, 1, 2, 1] , __lowerCamelCase : int=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]="relu" , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"] , __lowerCamelCase : Tuple=[2, 3, 4] , __lowerCamelCase : List[Any]=1 , ) -> List[str]:
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = embeddings_size
SCREAMING_SNAKE_CASE__ = hidden_sizes
SCREAMING_SNAKE_CASE__ = depths
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = len(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = out_features
SCREAMING_SNAKE_CASE__ = out_indices
SCREAMING_SNAKE_CASE__ = num_groups
def lowercase_ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self : List[str] ) -> Dict:
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowercase_ ( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ = BitModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = BitForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = BitBackbone(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(__lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowercase_ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
a = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
a = False
def lowercase_ ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def lowercase_ ( self : Union[str, Any] ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self : str ) -> Dict:
return
@unittest.skip(reason='''Bit does not output attentions''' )
def lowercase_ ( self : Any ) -> Dict:
pass
@unittest.skip(reason='''Bit does not use inputs_embeds''' )
def lowercase_ ( self : Tuple ) -> Optional[int]:
pass
@unittest.skip(reason='''Bit does not support input and output embeddings''' )
def lowercase_ ( self : Optional[int] ) -> str:
pass
def lowercase_ ( self : str ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def lowercase_ ( self : Dict ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCamelCase )
def lowercase_ ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=__lowerCamelCase )
for name, module in model.named_modules():
if isinstance(__lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def lowercase_ ( self : str ) -> Optional[Any]:
def check_hidden_states_output(__lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
SCREAMING_SNAKE_CASE__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = ['''preactivation''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
SCREAMING_SNAKE_CASE__ = layer_type
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@unittest.skip(reason='''Bit does not use feedforward chunking''' )
def lowercase_ ( self : List[str] ) -> Dict:
pass
def lowercase_ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowercase_ ( self : Optional[Any] ) -> Dict:
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = BitModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self : List[Any] ) -> List[Any]:
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowercase_ ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = self.default_image_processor
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**__lowerCamelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor([[-0.6526, -0.5263, -1.4398]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@require_torch
class UpperCAmelCase__ ( A__ , unittest.TestCase ):
"""simple docstring"""
a = (BitBackbone,) if is_torch_available() else ()
a = BitConfig
a = False
def lowercase_ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ = BitModelTester(self )
| 472 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self : Any , a_ : int , a_ : int ) -> Union[str, Any]:
'''simple docstring'''
a__ : int = jnp.ones((batch_size, length) ) / length
return scores
def UpperCAmelCase ( self : Any ) -> Dict:
'''simple docstring'''
a__ : str = None
a__ : Optional[Any] = 20
a__ : str = self._get_uniform_logits(batch_size=2 , length=a_ )
# tweak scores to not be uniform anymore
a__ : str = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch
a__ : Union[str, Any] = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch
# compute softmax
a__ : List[Any] = jax.nn.softmax(a_ , axis=-1 )
a__ : Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : List[Any] = FlaxTemperatureLogitsWarper(temperature=1.3 )
a__ : str = jax.nn.softmax(temp_dist_warper_sharper(a_ , scores.copy() , cur_len=a_ ) , axis=-1 )
a__ : str = jax.nn.softmax(temp_dist_warper_smoother(a_ , scores.copy() , cur_len=a_ ) , axis=-1 )
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) )
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) )
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() )
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() )
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() )
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() )
def UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
a__ : List[Any] = None
a__ : int = 10
a__ : List[str] = 2
# create ramp distribution
a__ : str = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, vocab_size) ).copy()
a__ : Tuple = ramp_logits[1:, : vocab_size // 2] + vocab_size
a__ : Union[str, Any] = FlaxTopKLogitsWarper(3 )
a__ : List[str] = top_k_warp(a_ , a_ , cur_len=a_ )
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] )
self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] )
# check special case
a__ : Any = 5
a__ : Optional[int] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 )
a__ : List[str] = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, length) ).copy()
a__ : int = top_k_warp_safety_check(a_ , a_ , cur_len=a_ )
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] )
def UpperCAmelCase ( self : Optional[Any] ) -> Any:
'''simple docstring'''
a__ : Any = None
a__ : List[str] = 10
a__ : int = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
a__ : Optional[Any] = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) )
a__ : Any = FlaxTopPLogitsWarper(0.8 )
a__ : List[Any] = np.exp(top_p_warp(a_ , a_ , cur_len=a_ ) )
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
a__ : Tuple = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] )
self.assertTrue(np.allclose(a_ , a_ , atol=1E-3 ) )
# check edge cases with negative and extreme logits
a__ : Union[str, Any] = np.broadcast_to(np.arange(a_ )[None, :] , (batch_size, vocab_size) ).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
a__ : Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
a__ : List[str] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 )
a__ : Any = top_p_warp(a_ , a_ , cur_len=a_ )
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] )
def UpperCAmelCase ( self : str ) -> Optional[Any]:
'''simple docstring'''
a__ : Optional[Any] = 20
a__ : str = 4
a__ : int = 0
a__ : Any = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
# check that min length is applied at length 5
a__ : Optional[int] = ids_tensor((batch_size, 20) , vocab_size=20 )
a__ : Optional[Any] = 5
a__ : int = self._get_uniform_logits(a_ , a_ )
a__ : str = min_dist_processor(a_ , a_ , cur_len=a_ )
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] )
# check that min length is not applied anymore at length 15
a__ : List[Any] = self._get_uniform_logits(a_ , a_ )
a__ : Optional[int] = 15
a__ : Optional[int] = min_dist_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def UpperCAmelCase ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
a__ : List[Any] = 20
a__ : Tuple = 4
a__ : Optional[int] = 0
a__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
# check that all scores are -inf except the bos_token_id score
a__ : Any = ids_tensor((batch_size, 1) , vocab_size=20 )
a__ : Union[str, Any] = 1
a__ : Dict = self._get_uniform_logits(a_ , a_ )
a__ : List[str] = logits_processor(a_ , a_ , cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
a__ : str = 3
a__ : Optional[int] = self._get_uniform_logits(a_ , a_ )
a__ : Optional[int] = logits_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def UpperCAmelCase ( self : str ) -> List[Any]:
'''simple docstring'''
a__ : Union[str, Any] = 20
a__ : Tuple = 4
a__ : List[Any] = 0
a__ : Optional[Any] = 5
a__ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
# check that all scores are -inf except the eos_token_id when max_length is reached
a__ : Tuple = ids_tensor((batch_size, 4) , vocab_size=20 )
a__ : Tuple = 4
a__ : List[str] = self._get_uniform_logits(a_ , a_ )
a__ : List[Any] = logits_processor(a_ , a_ , cur_len=a_ )
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() )
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
a__ : List[Any] = 3
a__ : Optional[int] = self._get_uniform_logits(a_ , a_ )
a__ : Tuple = logits_processor(a_ , a_ , cur_len=a_ )
self.assertFalse(jnp.isinf(a_ ).any() )
def UpperCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
a__ : Optional[int] = 4
a__ : List[Any] = 10
a__ : Any = 15
a__ : Optional[int] = 2
a__ : List[str] = 1
a__ : Optional[Any] = 15
# dummy input_ids and scores
a__ : Union[str, Any] = ids_tensor((batch_size, sequence_length) , a_ )
a__ : List[Any] = input_ids.copy()
a__ : List[str] = self._get_uniform_logits(a_ , a_ )
a__ : int = scores.copy()
# instantiate all dist processors
a__ : Any = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : Any = FlaxTopKLogitsWarper(3 )
a__ : Optional[int] = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ : Tuple = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
a__ : List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
a__ : int = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
a__ : str = 10
# no processor list
a__ : Tuple = temp_dist_warp(a_ , a_ , cur_len=a_ )
a__ : Dict = top_k_warp(a_ , a_ , cur_len=a_ )
a__ : Optional[Any] = top_p_warp(a_ , a_ , cur_len=a_ )
a__ : Tuple = min_dist_proc(a_ , a_ , cur_len=a_ )
a__ : Dict = bos_dist_proc(a_ , a_ , cur_len=a_ )
a__ : Union[str, Any] = eos_dist_proc(a_ , a_ , cur_len=a_ )
# with processor list
a__ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ : List[Any] = processor(a_ , a_ , cur_len=a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ , a_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] = 4
a__ : Optional[Any] = 10
a__ : str = 15
a__ : Any = 2
a__ : Dict = 1
a__ : Optional[Any] = 15
# dummy input_ids and scores
a__ : Any = ids_tensor((batch_size, sequence_length) , a_ )
a__ : Tuple = input_ids.copy()
a__ : Dict = self._get_uniform_logits(a_ , a_ )
a__ : Optional[int] = scores.copy()
# instantiate all dist processors
a__ : Union[str, Any] = FlaxTemperatureLogitsWarper(temperature=0.5 )
a__ : List[Any] = FlaxTopKLogitsWarper(3 )
a__ : str = FlaxTopPLogitsWarper(0.8 )
# instantiate all logits processors
a__ : int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=a_ )
a__ : Optional[int] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=a_ )
a__ : Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=a_ , eos_token_id=a_ )
a__ : Optional[int] = 10
# no processor list
def run_no_processor_list(a_ : Optional[Any] , a_ : List[str] , a_ : List[Any] ):
a__ : Union[str, Any] = temp_dist_warp(a_ , a_ , cur_len=a_ )
a__ : List[str] = top_k_warp(a_ , a_ , cur_len=a_ )
a__ : List[str] = top_p_warp(a_ , a_ , cur_len=a_ )
a__ : Optional[Any] = min_dist_proc(a_ , a_ , cur_len=a_ )
a__ : List[str] = bos_dist_proc(a_ , a_ , cur_len=a_ )
a__ : Dict = eos_dist_proc(a_ , a_ , cur_len=a_ )
return scores
# with processor list
def run_processor_list(a_ : List[str] , a_ : Dict , a_ : Dict ):
a__ : str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] )
a__ : int = processor(a_ , a_ , cur_len=a_ )
return scores
a__ : Any = jax.jit(a_ )
a__ : List[Any] = jax.jit(a_ )
a__ : Optional[Any] = jitted_run_no_processor_list(a_ , a_ , a_ )
a__ : List[Any] = jitted_run_processor_list(a_ , a_ , a_ )
# scores should be equal
self.assertTrue(jnp.allclose(a_ , a_ , atol=1E-3 ) )
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) | 642 |
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any]=None ) -> str:
'''simple docstring'''
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F"{torch_layer} layer.weight does not match"
a__ : Optional[Any] = nn.Parameter(lowerCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F"{torch_layer} layer.bias does not match"
a__ : str = nn.Parameter(lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
# set torch weights for 1-to-1 comparison
a__ : Tuple = np.asarray(weights[0] )
a__ : Any = np.asarray(weights[1] )
a__ : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowercase__ ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> str:
'''simple docstring'''
# set torch weights for 1-to-1 comparison
a__ : int = np.asarray(weights[0] )
a__ : List[str] = np.asarray(weights[1] )
a__ : Optional[int] = np.asarray(weights[2] )
a__ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase__ ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase__ ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase__ ).view(-1 , lowerCAmelCase__ ).contiguous().transpose(0 , 1 ) , )
def lowercase__ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
# layernorm 1
a__ : int = weights[0][0][0]
a__ : Optional[Any] = np.asarray(layer_norm_a[0] )
a__ : Optional[int] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# lsh weights + output
a__ : Dict = weights[0][1]
if len(lowerCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
else:
set_layer_weights_in_torch_local(lowerCAmelCase__ , torch_block.attention , lowerCAmelCase__ )
# intermediate weighs
a__ : Optional[int] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase__ ) == 4:
a__ : List[str] = intermediate_weights[2]
# layernorm 2
a__ : List[Any] = np.asarray(intermediate_weights[0][0] )
a__ : Optional[int] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# intermediate dense
a__ : Tuple = np.asarray(intermediate_weights[1][0] )
a__ : Union[str, Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
# intermediate out
a__ : Optional[Any] = np.asarray(intermediate_weights[4][0] )
a__ : Dict = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def lowercase__ ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
'''simple docstring'''
# reformer model
a__ : int = torch_model.reformer
# word embeds
a__ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase__ ) , )
if isinstance(weights[3] , lowerCAmelCase__ ):
a__ : Dict = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a__ : str = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F"{position_embeddings[emb_idx]} emb does not match"
a__ : List[str] = nn.Parameter(torch.tensor(lowerCAmelCase__ ) )
a__ : Optional[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a__ : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# output layer norm
a__ : str = np.asarray(weights[7][0] )
a__ : List[Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase__ ) , torch.tensor(lowerCAmelCase__ ) , )
# output embeddings
a__ : str = np.asarray(weights[9][0] )
a__ : Optional[int] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase__ ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase__ ) , )
def lowercase__ ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
# Initialise PyTorch model
a__ : List[Any] = ReformerConfig.from_json_file(lowerCAmelCase__ )
print(F"Building PyTorch model from configuration: {config}" )
a__ : Optional[Any] = ReformerModelWithLMHead(lowerCAmelCase__ )
with open(lowerCAmelCase__ , "rb" ) as f:
a__ : List[str] = pickle.load(lowerCAmelCase__ )["weights"]
set_model_weights_in_torch(lowerCAmelCase__ , lowerCAmelCase__ , config.hidden_size )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
torch.save(model.state_dict() , lowerCAmelCase__ )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__UpperCAmelCase = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 642 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a__ : Dict =logging.get_logger(__name__)
a__ : str ={
"microsoft/swin-tiny-patch4-window7-224": (
"https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str ='''swin'''
SCREAMING_SNAKE_CASE_ : Optional[int] ={
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : str , __A : Optional[int]=2_2_4 , __A : int=4 , __A : int=3 , __A : Optional[Any]=9_6 , __A : Union[str, Any]=[2, 2, 6, 2] , __A : Union[str, Any]=[3, 6, 1_2, 2_4] , __A : int=7 , __A : Optional[int]=4.0 , __A : Any=True , __A : List[str]=0.0 , __A : Dict=0.0 , __A : int=0.1 , __A : Dict="gelu" , __A : Tuple=False , __A : Optional[Any]=0.02 , __A : int=1e-5 , __A : List[str]=3_2 , __A : List[str]=None , __A : Dict=None , **__A : Any , ):
super().__init__(**A_ )
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = embed_dim
__UpperCamelCase = depths
__UpperCamelCase = len(A_ )
__UpperCamelCase = num_heads
__UpperCamelCase = window_size
__UpperCamelCase = mlp_ratio
__UpperCamelCase = qkv_bias
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = drop_path_rate
__UpperCamelCase = hidden_act
__UpperCamelCase = use_absolute_embeddings
__UpperCamelCase = layer_norm_eps
__UpperCamelCase = initializer_range
__UpperCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCamelCase = int(embed_dim * 2 ** (len(A_ ) - 1) )
__UpperCamelCase = ['stem'] + [f'''stage{idx}''' for idx in range(1 , len(A_ ) + 1 )]
__UpperCamelCase , __UpperCamelCase = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names )
class snake_case ( _UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] =version.parse("1.11" )
@property
def _lowerCamelCase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Optional[Any] ):
return 1e-4
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
a__ : str ={
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple =[
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 434 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class snake_case_ ( __A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = "dpt"
def __init__( self : Optional[Any] , _UpperCamelCase : Tuple=7_6_8 , _UpperCamelCase : Dict=1_2 , _UpperCamelCase : Union[str, Any]=1_2 , _UpperCamelCase : List[Any]=3_0_7_2 , _UpperCamelCase : Dict="gelu" , _UpperCamelCase : Union[str, Any]=0.0 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.02 , _UpperCamelCase : List[str]=1e-12 , _UpperCamelCase : Any=3_8_4 , _UpperCamelCase : int=1_6 , _UpperCamelCase : Any=3 , _UpperCamelCase : Dict=False , _UpperCamelCase : str=True , _UpperCamelCase : Union[str, Any]=[2, 5, 8, 1_1] , _UpperCamelCase : List[str]="project" , _UpperCamelCase : Optional[int]=[4, 2, 1, 0.5] , _UpperCamelCase : Dict=[9_6, 1_9_2, 3_8_4, 7_6_8] , _UpperCamelCase : Dict=2_5_6 , _UpperCamelCase : Optional[Any]=-1 , _UpperCamelCase : int=False , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : str=0.4 , _UpperCamelCase : Tuple=2_5_5 , _UpperCamelCase : Union[str, Any]=0.1 , _UpperCamelCase : Tuple=[1, 1_0_2_4, 2_4, 2_4] , _UpperCamelCase : List[str]=[0, 1] , _UpperCamelCase : List[Any]=None , **_UpperCamelCase : Dict , ) ->Any:
super().__init__(**_UpperCamelCase )
snake_case_ = hidden_size
snake_case_ = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
snake_case_ = BitConfig(**_UpperCamelCase )
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
snake_case_ = backbone_config
else:
raise ValueError(
f'''backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.''' )
snake_case_ = backbone_featmap_shape
snake_case_ = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
snake_case_ = None
snake_case_ = None
snake_case_ = []
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = qkv_bias
snake_case_ = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
snake_case_ = readout_type
snake_case_ = reassemble_factors
snake_case_ = neck_hidden_sizes
snake_case_ = fusion_hidden_size
snake_case_ = head_in_index
snake_case_ = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
snake_case_ = use_auxiliary_head
snake_case_ = auxiliary_loss_weight
snake_case_ = semantic_loss_ignore_index
snake_case_ = semantic_classifier_dropout
def snake_case__( self : List[str] ) ->List[Any]:
snake_case_ = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case_ = self.backbone_config.to_dict()
snake_case_ = self.__class__.model_type
return output | 39 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = 3_8_4
if "tiny" in model_name:
A__ = [3, 3, 9, 3]
A__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
A__ = 5_1_2
if "large" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
A__ = 7_6_8
if "xlarge" in model_name:
A__ = [3, 3, 2_7, 3]
A__ = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
A__ = 1_0_2_4
# set label information
A__ = 1_5_0
A__ = """huggingface/label-files"""
A__ = """ade20k-id2label.json"""
A__ = json.load(open(hf_hub_download(__a , __a , repo_type="""dataset""" ) , """r""" ) )
A__ = {int(__a ): v for k, v in idalabel.items()}
A__ = {v: k for k, v in idalabel.items()}
A__ = ConvNextConfig(
depths=__a , hidden_sizes=__a , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
A__ = UperNetConfig(
backbone_config=__a , auxiliary_in_channels=__a , num_labels=__a , idalabel=__a , labelaid=__a , )
return config
def __lowerCamelCase ( __a :Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A__ = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def __lowerCamelCase ( __a :Union[str, Any] , __a :List[str] , __a :str ) -> str:
"""simple docstring"""
A__ = dct.pop(__a )
A__ = val
def __lowerCamelCase ( __a :Any , __a :int , __a :Any ) -> List[Any]:
"""simple docstring"""
A__ = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
A__ = model_name_to_url[model_name]
A__ = torch.hub.load_state_dict_from_url(__a , map_location="""cpu""" )["""state_dict"""]
A__ = get_upernet_config(__a )
A__ = UperNetForSemanticSegmentation(__a )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
A__ = state_dict.pop(__a )
if "bn" in key:
A__ = key.replace("""bn""" , """batch_norm""" )
A__ = val
# rename keys
A__ = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a , __a , __a )
model.load_state_dict(__a )
# verify on image
A__ = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
A__ = Image.open(requests.get(__a , stream=__a ).raw ).convert("""RGB""" )
A__ = SegformerImageProcessor()
A__ = processor(__a , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
A__ = model(__a )
if model_name == "upernet-convnext-tiny":
A__ = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
A__ = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
A__ = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
A__ = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
A__ = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , __a , atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__a )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__a )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-convnext-tiny''',
type=str,
choices=[F'''upernet-convnext-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''', '''xlarge''']],
help='''Name of the ConvNext UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A : str = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 176 | 0 |
from statistics import mean
import numpy as np
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = 0
# Number of processes finished
a = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
a = [0] * no_of_process
# List to include calculation results
a = [0] * no_of_process
# Sort by arrival time.
a = [burst_time[i] for i in np.argsort(A )]
a = [process_name[i] for i in np.argsort(A )]
arrival_time.sort()
while no_of_process > finished_process_count:
a = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
a = arrival_time[i]
a = 0
# Index showing the location of the process being performed
a = 0
# Saves the current response ratio.
a = 0
for i in range(0, A ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
a = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
a = temp
a = i
# Calculate the turn around time
a = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
a = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( A : list, A : list, A : list, A : int ):
'''simple docstring'''
a = [0] * no_of_process
for i in range(0, A ):
a = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__lowerCAmelCase : str = 5
__lowerCAmelCase : int = ['A', 'B', 'C', 'D', 'E']
__lowerCAmelCase : Union[str, Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Optional[Any] = [1, 2, 3, 4, 5]
__lowerCAmelCase : Union[str, Any] = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__lowerCAmelCase : Any = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print('Process name \tArrival time \tBurst time \tTurn around time \tWaiting time')
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 716 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__ (_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LongformerTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = LongformerTokenizerFast
SCREAMING_SNAKE_CASE_ : str = True
def __UpperCAmelCase ( self : Optional[int] ) -> str:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a = {"unk_token": "<unk>"}
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Dict , **__lowerCamelCase : Dict ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[Any] ) -> Union[str, Any]:
a = "lower newer"
a = "lower newer"
return input_text, output_text
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
a = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
a = "lower newer"
a = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
a = tokenizer.tokenize(__lowerCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
a = tokens + [tokenizer.unk_token]
a = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
a = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=__lowerCamelCase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
a = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
a = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
a = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __UpperCAmelCase ( self : Any ) -> str:
a = self.get_tokenizer()
a = "Encode this sequence."
a = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase , add_prefix_space=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
a = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
# Testing spaces after special tokens
a = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase )} ) # mask token has a left space
a = tokenizer.convert_tokens_to_ids(__lowerCamelCase )
a = "Encode <mask> sequence"
a = "Encode <mask>sequence"
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
a = tokenizer.encode(__lowerCamelCase )
a = encoded.index(__lowerCamelCase )
a = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
pass
def __UpperCAmelCase ( self : int ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = self.rust_tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = self.tokenizer_class.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
a = "A, <mask> AllenNLP sentence."
a = tokenizer_r.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
a = tokenizer_p.encode_plus(__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_token_type_ids=__lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
a = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
a = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
__lowerCamelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
a = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
a = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["add_prefix_space"] , __lowerCamelCase )
self.assertEqual(post_processor_state["trim_offsets"] , __lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ) -> Dict:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
a = f"""{text_of_1_token} {text_of_1_token}"""
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ) + 1, len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(__lowerCamelCase ), len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ) + 1, 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
a = self.rust_tokenizer_class.from_pretrained(
__lowerCamelCase , use_fast=__lowerCamelCase , add_prefix_space=__lowerCamelCase , trim_offsets=__lowerCamelCase )
a = tokenizer_r(__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , add_special_tokens=__lowerCamelCase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(__lowerCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(__lowerCamelCase ), 1 + len(__lowerCamelCase ) + 1 + len(__lowerCamelCase )) , )
| 662 | 0 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase :
def __init__( self , snake_case , snake_case=3 , snake_case=32 , snake_case=3 , snake_case=10 , snake_case=[10, 20, 30, 40] , snake_case=[1, 1, 2, 1] , snake_case=True , snake_case=True , snake_case="relu" , snake_case=3 , snake_case=None , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = num_channels
snake_case_ = embeddings_size
snake_case_ = hidden_sizes
snake_case_ = depths
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_act
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = len(_UpperCAmelCase )
def a ( self ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.num_labels )
snake_case_ = self.get_config()
return config, pixel_values, labels
def a ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = TFResNetModel(config=_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = self.num_labels
snake_case_ = TFResNetForImageClassification(_UpperCAmelCase )
snake_case_ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self ):
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : int = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
{'''feature-extraction''': TFResNetModel, '''image-classification''': TFResNetForImageClassification}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Any = False
__SCREAMING_SNAKE_CASE : List[Any] = False
def a ( self ):
snake_case_ = TFResNetModelTester(self )
snake_case_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def a ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def a ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def a ( self ):
pass
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a ( self ):
def check_hidden_states_output(snake_case , snake_case , snake_case ):
snake_case_ = model_class(_UpperCAmelCase )
snake_case_ = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case_ = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ = ['''basic''', '''bottleneck''']
for model_class in self.all_model_classes:
for layer_type in layers_type:
snake_case_ = layer_type
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def a ( self ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def a ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = TFResNetModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ):
'''simple docstring'''
snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def a ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def a ( self ):
snake_case_ = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=_UpperCAmelCase , return_tensors='tf' )
# forward pass
snake_case_ = model(**_UpperCAmelCase )
# verify the logits
snake_case_ = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
snake_case_ = tf.constant([-11.1069, -9.78_77, -8.37_77] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
| 362 |
"""simple docstring"""
from __future__ import annotations
def __A ( a_ :list[int]) -> int:
if not nums:
return 0
__a : Any = nums[0]
__a : Optional[Any] = 0
for num in nums[1:]:
__a , __a : Optional[Any] = (
max_excluding + num,
max(a_ , a_),
)
return max(a_ , a_)
if __name__ == "__main__":
import doctest
doctest.testmod() | 52 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
from collections.abc import Sequence
from random import randint
from matplotlib import pyplot as plt
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Sequence[float], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> tuple[int | None, int | None, float]:
if not arr:
return None, None, 0
if low == high:
return low, high, arr[low]
UpperCAmelCase_ : Optional[Any] = (low + high) // 2
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = max_subarray(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = max_subarray(SCREAMING_SNAKE_CASE__, mid + 1, SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = max_cross_sum(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if left_sum >= right_sum and left_sum >= cross_sum:
return left_low, left_high, left_sum
elif right_sum >= left_sum and right_sum >= cross_sum:
return right_low, right_high, right_sum
return cross_left, cross_right, cross_sum
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : Sequence[float], SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int, SCREAMING_SNAKE_CASE__ : int ) -> tuple[int, int, float]:
UpperCAmelCase_ , UpperCAmelCase_ : Dict = float('''-inf''' ), -1
UpperCAmelCase_ , UpperCAmelCase_ : Any = float('''-inf''' ), -1
UpperCAmelCase_ : int | float = 0
for i in range(SCREAMING_SNAKE_CASE__, low - 1, -1 ):
summ += arr[i]
if summ > left_sum:
UpperCAmelCase_ : int = summ
UpperCAmelCase_ : Dict = i
UpperCAmelCase_ : int = 0
for i in range(mid + 1, high + 1 ):
summ += arr[i]
if summ > right_sum:
UpperCAmelCase_ : List[str] = summ
UpperCAmelCase_ : Optional[int] = i
return max_left, max_right, (left_sum + right_sum)
def lowerCamelCase_ ( SCREAMING_SNAKE_CASE__ : int ) -> float:
UpperCAmelCase_ : List[Any] = [randint(1, SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
UpperCAmelCase_ : int = time.time()
max_subarray(SCREAMING_SNAKE_CASE__, 0, input_size - 1 )
UpperCAmelCase_ : Union[str, Any] = time.time()
return end - start
def lowerCamelCase_ ( ) -> None:
UpperCAmelCase_ : Optional[int] = [10, 100, 1000, 10000, 50000, 100000, 200000, 300000, 400000, 500000]
UpperCAmelCase_ : str = [time_max_subarray(SCREAMING_SNAKE_CASE__ ) for input_size in input_sizes]
print('''No of Inputs\t\tTime Taken''' )
for input_size, runtime in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__, '''\t\t''', SCREAMING_SNAKE_CASE__ )
plt.plot(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
plt.xlabel('''Number of Inputs''' )
plt.ylabel('''Time taken in seconds''' )
plt.show()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 644 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __a (unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCAmelCase_ : List[str] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
self.assertTrue(isinstance(dc.token_ids , __magic_name__ ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCAmelCase_ : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__magic_name__ ):
DisjunctiveConstraint(__magic_name__ ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
UpperCAmelCase_ : List[str] = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
UpperCAmelCase_ : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = dc.update(2 )
UpperCAmelCase_ : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(3 )
UpperCAmelCase_ : Dict = stepped is True and completed is True and reset is False
self.assertTrue(__magic_name__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCAmelCase_ : Tuple = DisjunctiveConstraint(__magic_name__ )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 644 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def _lowerCamelCase (__lowerCamelCase : Iterable[str] , __lowerCamelCase : int ) -> Union[str, Any]:
a__ = iter(_UpperCamelCase )
while True:
a__ = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) )
if not chunk:
return
yield chunk
def _lowerCamelCase (__lowerCamelCase : str ) -> Dict:
a__ = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
a__ = ""
if len(_UpperCamelCase ) < 2:
return dirty
for i in range(len(_UpperCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_UpperCamelCase ) & 1:
clean += "X"
return clean
def _lowerCamelCase (__lowerCamelCase : str ) -> Optional[int]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
a__ = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a__ = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_UpperCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_UpperCamelCase )
return table
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : str ) -> int:
a__ = generate_table(_UpperCamelCase )
a__ = prepare_input(_UpperCamelCase )
a__ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
a__ , a__ = divmod(table.index(_UpperCamelCase ) , 5 )
a__ , a__ = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _lowerCamelCase (__lowerCamelCase : str , __lowerCamelCase : str ) -> Optional[Any]:
a__ = generate_table(_UpperCamelCase )
a__ = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_UpperCamelCase , 2 ):
a__ , a__ = divmod(table.index(_UpperCamelCase ) , 5 )
a__ , a__ = divmod(table.index(_UpperCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 489 |
from __future__ import annotations
def a__ ( _UpperCamelCase : list[float] ):
if len(_UpperCamelCase ) < 2:
raise ValueError('''Monogons and Digons are not polygons in the Euclidean space''' )
if any(i <= 0 for i in nums ):
raise ValueError('''All values must be greater than 0''' )
__lowerCamelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 370 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
SCREAMING_SNAKE_CASE_ = False
class snake_case_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class snake_case_ ( unittest.TestCase ):
def snake_case_ ( self ):
a_ : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained("shi-labs/versatile-diffusion" )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
a_ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg" )
a_ : Optional[Any] = torch.manual_seed(0 )
a_ : Dict = pipe(
image=a_ , generator=a_ , guidance_scale=7.5 , num_inference_steps=5_0 , output_type="numpy" , ).images
a_ : List[Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
a_ : Any = np.array([0.0_441, 0.0_469, 0.0_507, 0.0_575, 0.0_632, 0.0_650, 0.0_865, 0.0_909, 0.0_945] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 370 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
a : List[str] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = BartphoTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
lowercase__ : str= ["▁This", "▁is", "▁a", "▁t", "est"]
lowercase__ : Optional[int]= dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowercase__ : str= {"unk_token": "<unk>"}
lowercase__ : Optional[int]= os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F'''{token} {vocab_tokens[token]}\n''' )
lowercase__ : Union[str, Any]= BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , **snake_case__ ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : Any= "This is a là test"
lowercase__ : Any= "This is a<unk><unk> test"
return input_text, output_text
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Dict= BartphoTokenizer(snake_case__ , self.monolingual_vocab_file , **self.special_tokens_map )
lowercase__ : Tuple= "This is a là test"
lowercase__ : Tuple= "▁This ▁is ▁a ▁l à ▁t est".split()
lowercase__ : List[str]= tokenizer.tokenize(snake_case__ )
self.assertListEqual(snake_case__ , snake_case__ )
lowercase__ : List[str]= tokens + [tokenizer.unk_token]
lowercase__ : Any= [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ )
| 218 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from numpy import array
def lowercase__(A ) ->list[list[float]]:
"""simple docstring"""
lowercase__ : str= Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(A ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
lowercase__ : int= float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creates a copy of the matrix with swapped positions of the elements
lowercase__ : Tuple= [[0.0, 0.0], [0.0, 0.0]]
lowercase__, lowercase__ : Dict= matrix[1][1], matrix[0][0]
lowercase__, lowercase__ : Dict= -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(A ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(A ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
lowercase__ : str= float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError("This matrix has no inverse." )
# Creating cofactor matrix
lowercase__ : List[str]= [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
lowercase__ : Union[str, Any]= (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
lowercase__ : Optional[int]= -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
lowercase__ : List[str]= (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
lowercase__ : Union[str, Any]= -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
lowercase__ : Optional[Any]= (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
lowercase__ : Union[str, Any]= -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
lowercase__ : Dict= (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
lowercase__ : Tuple= -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
lowercase__ : Tuple= (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
lowercase__ : Dict= array(A )
for i in range(3 ):
for j in range(3 ):
lowercase__ : int= cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
lowercase__ : str= array(A )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(A )
# Calculate the inverse of the matrix
return [[float(d(A ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("Please provide a matrix of size 2x2 or 3x3." )
| 218 | 1 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCAmelCase_ , 2 ) - pow(lowerCAmelCase_ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCAmelCase_ , 2 ) + pow(lowerCAmelCase_ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod() | 711 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : str =logging.get_logger(__name__)
class snake_case__( UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = """encoder-decoder"""
SCREAMING_SNAKE_CASE__ : str = True
def __init__( self , **__lowercase ) -> Union[str, Any]:
super().__init__(**__lowercase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ : str = kwargs.pop('''encoder''' )
lowerCAmelCase_ : int = encoder_config.pop('''model_type''' )
lowerCAmelCase_ : Optional[Any] = kwargs.pop('''decoder''' )
lowerCAmelCase_ : Optional[Any] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ : Union[str, Any] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : List[str] = AutoConfig.for_model(__lowercase , **__lowercase )
lowerCAmelCase_ : Any = True
@classmethod
def lowercase_ ( cls , __lowercase , __lowercase , **__lowercase ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
lowerCAmelCase_ : int = True
lowerCAmelCase_ : List[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__lowercase )
def lowercase_ ( self ) -> Any:
lowerCAmelCase_ : Optional[Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : List[str] = self.encoder.to_dict()
lowerCAmelCase_ : Dict = self.decoder.to_dict()
lowerCAmelCase_ : Optional[Any] = self.__class__.model_type
return output | 619 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER', 'False' ) ) is not True, reason='Skipping test because should only be run when releasing minor transformers version', )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_5_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'pytorch',
'script': 'run_ddp.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.7, 'eval_loss': 0.6},
},
{
'framework': 'tensorflow',
'script': 'run_tf_dist.py',
'model_name_or_path': 'distilbert-base-cased',
'instance_type': 'ml.p3.16xlarge',
'results': {'train_runtime': 6_0_0, 'eval_accuracy': 0.6, 'eval_loss': 0.7},
},
] )
class snake_case_ ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split(), encoding="utf-8", check=UpperCAmelCase_, )
assert hasattr(self, "env" )
def __UpperCAmelCase ( self, A_ ) -> Union[str, Any]:
UpperCAmelCase__ =f"""{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}"""
# distributed data settings
UpperCAmelCase__ ={"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None
# creates estimator
return HuggingFace(
entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=UpperCAmelCase_, instance_count=UpperCAmelCase_, instance_type=self.instance_type, debugger_hook_config=UpperCAmelCase_, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=UpperCAmelCase_, py_version="py36", )
def __UpperCAmelCase ( self, A_ ) -> Any:
TrainingJobAnalytics(UpperCAmelCase_ ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def __UpperCAmelCase ( self, A_ ) -> Tuple:
UpperCAmelCase__ =self.create_estimator(UpperCAmelCase_ )
# run training
estimator.fit()
# result dataframe
UpperCAmelCase__ =TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase__ =list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
UpperCAmelCase__ =list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase__ =(
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds", 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""", "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, UpperCAmelCase_ )
| 625 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : List[str] , UpperCAmelCase_ : WhisperForConditionalGeneration , UpperCAmelCase_ : WhisperProcessor , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCAmelCase_ : StableDiffusionSafetyChecker , UpperCAmelCase_ : CLIPImageProcessor , ) -> List[Any]:
"""simple docstring"""
super().__init__()
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=UpperCAmelCase_ , speech_processor=UpperCAmelCase_ , vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
_lowerCAmelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_ )
def __lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCAmelCase_ )
@torch.no_grad()
def __call__( self : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=16_000 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 512 , UpperCAmelCase_ : int = 50 , UpperCAmelCase_ : float = 7.5 , UpperCAmelCase_ : Optional[Union[str, List[str]]] = None , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[torch.FloatTensor] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCAmelCase_ : int = 1 , **UpperCAmelCase_ : Any , ) -> Any:
"""simple docstring"""
_lowerCAmelCase = self.speech_processor.feature_extractor(
UpperCAmelCase_ , return_tensors='pt' , sampling_rate=UpperCAmelCase_ ).input_features.to(self.device )
_lowerCAmelCase = self.speech_model.generate(UpperCAmelCase_ , max_length=480_000 )
_lowerCAmelCase = self.speech_processor.tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ , normalize=UpperCAmelCase_ )[
0
]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = 1
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = len(UpperCAmelCase_ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(UpperCAmelCase_ )}.""" )
# get prompt text embeddings
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCAmelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCAmelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_lowerCAmelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCAmelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = text_embeddings.shape
_lowerCAmelCase = text_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCAmelCase_ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase = 42
if negative_prompt is None:
_lowerCAmelCase = [''] * batch_size
elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !="""
F""" {type(UpperCAmelCase_ )}.""" )
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = [negative_prompt]
elif batch_size != len(UpperCAmelCase_ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
' the batch size of `prompt`.' )
else:
_lowerCAmelCase = negative_prompt
_lowerCAmelCase = text_input_ids.shape[-1]
_lowerCAmelCase = self.tokenizer(
UpperCAmelCase_ , padding='max_length' , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt' , )
_lowerCAmelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCAmelCase = uncond_embeddings.shape[1]
_lowerCAmelCase = uncond_embeddings.repeat(1 , UpperCAmelCase_ , 1 )
_lowerCAmelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device='cpu' , dtype=UpperCAmelCase_ ).to(
self.device )
else:
_lowerCAmelCase = torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_ )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_lowerCAmelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(UpperCAmelCase_ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCAmelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCAmelCase = {}
if accepts_eta:
_lowerCAmelCase = eta
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_ )
# predict the noise residual
_lowerCAmelCase = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = 1 / 0.18215 * latents
_lowerCAmelCase = self.vae.decode(UpperCAmelCase_ ).sample
_lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_ )
| 580 | 0 |
"""simple docstring"""
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
SCREAMING_SNAKE_CASE_ : Tuple = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def UpperCAmelCase__ ( A__=True ) -> Optional[int]:
"""simple docstring"""
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _A ( __a ):
__a = None
__a = None
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = dataset_module_factory(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = import_main_class(dataset_module.module_path , dataset=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = builder_cls(
cache_dir=SCREAMING_SNAKE_CASE__ , config_name=SCREAMING_SNAKE_CASE__ , hash=dataset_module.hash , )
lowerCamelCase__ = "/".join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=SCREAMING_SNAKE_CASE__ ).replace(os.sep , "/" ),
config.DATASET_INFO_FILENAME,
] )
lowerCamelCase__ = cached_path(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ )
self.assertTrue(os.path.exists(SCREAMING_SNAKE_CASE__ ) )
@pytest.mark.integration
def UpperCAmelCase__ ( A__ ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple"
lowerCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=A__ )
lowerCamelCase__ = import_main_class(dataset_module.module_path )
lowerCamelCase__ = builder_cls(
cache_dir=A__ , config_name="20220301.frr" , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
lowerCamelCase__ = None
builder_instance.download_and_prepare()
lowerCamelCase__ = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def UpperCAmelCase__ ( A__ ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ = dataset_module_factory("wikipedia" , cache_dir=A__ )
lowerCamelCase__ = import_main_class(dataset_module.module_path , dataset=A__ )
lowerCamelCase__ = builder_cls(
cache_dir=A__ , config_name="20220301.frr" , hash=dataset_module.hash , )
lowerCamelCase__ = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(A__ , A__ )
assert "train" in ds
assert isinstance(ds["train"] , A__ )
assert next(iter(ds["train"] ) )
| 721 |
"""simple docstring"""
def UpperCAmelCase__ ( ) -> int:
"""simple docstring"""
return 1
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(A__ )
def UpperCAmelCase__ ( A__ = 200 ) -> int:
"""simple docstring"""
return two_pound(A__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 274 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE_: Optional[int] ={
'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Union[str, Any] =['LlamaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: List[str] =['LlamaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: Tuple =[
'LlamaForCausalLM',
'LlamaModel',
'LlamaPreTrainedModel',
'LlamaForSequenceClassification',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE_: Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 78 |
'''simple docstring'''
import re
def lowerCAmelCase__ ( lowerCamelCase : str ):
if len(re.findall('[ATCG]' ,lowerCamelCase ) ) != len(lowerCamelCase ):
raise ValueError('Invalid Strand' )
return dna.translate(dna.maketrans('ATCG' ,'TAGC' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 0 |
'''simple docstring'''
from functools import lru_cache
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> set:
snake_case__ : Optional[int] = 2
snake_case__ : Optional[int] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(UpperCamelCase__ )
if n > 1:
factors.add(UpperCamelCase__ )
return factors
@lru_cache
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> int:
return len(unique_prime_factors(UpperCamelCase__ ) )
def __UpperCAmelCase ( UpperCamelCase__ :list ) -> bool:
return len(set(UpperCamelCase__ ) ) in (0, 1)
def __UpperCAmelCase ( UpperCamelCase__ :int ) -> list:
snake_case__ : Optional[Any] = 2
while True:
# Increment each value of a generated range
snake_case__ : List[str] = [base + i for i in range(UpperCamelCase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
snake_case__ : Dict = [upf_len(UpperCamelCase__ ) for x in group]
checker.append(UpperCamelCase__ )
# If all numbers in the list are equal, return the group variable.
if equality(UpperCamelCase__ ):
return group
# Increment our base variable by 1
base += 1
def __UpperCAmelCase ( UpperCamelCase__ :int = 4 ) -> int:
snake_case__ : List[Any] = run(UpperCamelCase__ )
return results[0] if len(UpperCamelCase__ ) else None
if __name__ == "__main__":
print(solution())
| 574 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = 'M-CLIP'
def __init__( self : List[Any] , __UpperCamelCase : int=1024 , __UpperCamelCase : Union[str, Any]=768 , **__UpperCamelCase : int ) -> Any:
"""simple docstring"""
snake_case__ : int = transformerDimSize
snake_case__ : Tuple = imageDimSize
super().__init__(**__UpperCamelCase )
class _SCREAMING_SNAKE_CASE (lowercase__ ):
A__ = MCLIPConfig
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , *__UpperCamelCase : Dict , **__UpperCamelCase : Any ) -> str:
"""simple docstring"""
super().__init__(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
snake_case__ : str = XLMRobertaModel(__UpperCamelCase )
snake_case__ : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowerCAmelCase ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ) -> Tuple:
"""simple docstring"""
snake_case__ : int = self.transformer(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
snake_case__ : List[str] = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__UpperCamelCase ), embs
| 574 | 1 |
'''simple docstring'''
class a_ :
def __init__( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]=None , __lowerCAmelCase : List[Any]=None ):
__snake_case = data
__snake_case = previous
__snake_case = next_node
def __str__( self : Dict ):
return F'{self.data}'
def lowercase__ ( self : str ):
return self.data
def lowercase__ ( self : int ):
return self.next
def lowercase__ ( self : Optional[Any] ):
return self.previous
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : int ):
__snake_case = head
def __iter__( self : Tuple ):
return self
def lowercase__ ( self : Union[str, Any] ):
if not self.current:
raise StopIteration
else:
__snake_case = self.current.get_data()
__snake_case = self.current.get_next()
return value
class a_ :
def __init__( self : str ):
__snake_case = None # First node in list
__snake_case = None # Last node in list
def __str__( self : Union[str, Any] ):
__snake_case = self.head
__snake_case = []
while current is not None:
nodes.append(current.get_data() )
__snake_case = current.get_next()
return " ".join(str(__lowerCAmelCase ) for node in nodes )
def __contains__( self : Any , __lowerCAmelCase : int ):
__snake_case = self.head
while current:
if current.get_data() == value:
return True
__snake_case = current.get_next()
return False
def __iter__( self : Union[str, Any] ):
return LinkedListIterator(self.head )
def lowercase__ ( self : Any ):
if self.head:
return self.head.get_data()
return None
def lowercase__ ( self : Any ):
if self.tail:
return self.tail.get_data()
return None
def lowercase__ ( self : int , __lowerCAmelCase : Node ):
if self.head is None:
__snake_case = node
__snake_case = node
else:
self.insert_before_node(self.head , __lowerCAmelCase )
def lowercase__ ( self : List[Any] , __lowerCAmelCase : Node ):
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowercase__ ( self : Optional[int] , __lowerCAmelCase : int ):
__snake_case = Node(__lowerCAmelCase )
if self.head is None:
self.set_head(__lowerCAmelCase )
else:
self.set_tail(__lowerCAmelCase )
def lowercase__ ( self : str , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
__snake_case = node
__snake_case = node.previous
if node.get_previous() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def lowercase__ ( self : Tuple , __lowerCAmelCase : Node , __lowerCAmelCase : Node ):
__snake_case = node
__snake_case = node.next
if node.get_next() is None:
__snake_case = node_to_insert
else:
__snake_case = node_to_insert
__snake_case = node_to_insert
def lowercase__ ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ):
__snake_case = 1
__snake_case = Node(__lowerCAmelCase )
__snake_case = self.head
while node:
if current_position == position:
self.insert_before_node(__lowerCAmelCase , __lowerCAmelCase )
return
current_position += 1
__snake_case = node.next
self.insert_after_node(self.tail , __lowerCAmelCase )
def lowercase__ ( self : int , __lowerCAmelCase : int ):
__snake_case = self.head
while node:
if node.get_data() == item:
return node
__snake_case = node.get_next()
raise Exception('Node not found' )
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Optional[Any] ):
if (node := self.get_node(__lowerCAmelCase )) is not None:
if node == self.head:
__snake_case = self.head.get_next()
if node == self.tail:
__snake_case = self.tail.get_previous()
self.remove_node_pointers(__lowerCAmelCase )
@staticmethod
def lowercase__ ( __lowerCAmelCase : Node ):
if node.get_next():
__snake_case = node.previous
if node.get_previous():
__snake_case = node.next
__snake_case = None
__snake_case = None
def lowercase__ ( self : Any ):
return self.head is None
def lowerCamelCase__ ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356 |
'''simple docstring'''
from math import isqrt, loga
def lowerCamelCase__ ( a ):
__snake_case = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , a , a ):
__snake_case = False
return [i for i in range(2 , a ) if is_prime[i]]
def lowerCamelCase__ ( a = 800800 , a = 800800 ):
__snake_case = degree * loga(a )
__snake_case = int(a )
__snake_case = calculate_prime_numbers(a )
__snake_case = 0
__snake_case = 0
__snake_case = len(a ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 356 | 1 |
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Tuple ={
'''facebook/maskformer-swin-base-ade''': (
'''https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'''
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Union[str, Any] =logging.get_logger(__name__)
class a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ : List[str] = 'maskformer'
UpperCAmelCase_ : str = {'hidden_size': 'mask_feature_size'}
UpperCAmelCase_ : Any = ['resnet', 'swin']
UpperCAmelCase_ : Dict = ['detr']
def __init__( self , lowercase__ = 256 , lowercase__ = 256 , lowercase__ = 0.1 , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = 0.02 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 1.0 , lowercase__ = 20.0 , lowercase__ = None , **lowercase__ , ) -> Dict:
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
__A = SwinConfig(
image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(lowercase__ , lowercase__ ):
__A = backbone_config.pop("model_type" )
__A = CONFIG_MAPPING[backbone_model_type]
__A = config_class.from_dict(lowercase__ )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. """
F"""Supported model types: {','.join(self.backbones_supported )}""" )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
__A = DetrConfig()
else:
# verify that the decoder is supported
__A = (
decoder_config.pop("model_type" ) if isinstance(lowercase__ , lowercase__ ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F"""Transformer Decoder {decoder_type} not supported, please use one of"""
F""" {','.join(self.decoders_supported )}""" )
if isinstance(lowercase__ , lowercase__ ):
__A = CONFIG_MAPPING[decoder_type]
__A = config_class.from_dict(lowercase__ )
__A = backbone_config
__A = decoder_config
# main feature dimension for the model
__A = fpn_feature_size
__A = mask_feature_size
# initializer
__A = init_std
__A = init_xavier_std
# Hungarian matcher && loss
__A = cross_entropy_weight
__A = dice_weight
__A = mask_weight
__A = use_auxiliary_loss
__A = no_object_weight
__A = output_auxiliary_logits
__A = self.decoder_config.encoder_attention_heads
__A = self.decoder_config.num_hidden_layers
super().__init__(**lowercase__ )
@classmethod
def _lowerCamelCase ( cls , lowercase__ , lowercase__ , **lowercase__ ) -> Optional[int]:
return cls(
backbone_config=lowercase__ , decoder_config=lowercase__ , **lowercase__ , )
def _lowerCamelCase ( self ) -> Dict[str, any]:
__A = copy.deepcopy(self.__dict__ )
__A = self.backbone_config.to_dict()
__A = self.decoder_config.to_dict()
__A = self.__class__.model_type
return output
| 715 |
from __future__ import annotations
from typing import Any
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
create_state_space_tree(lowerCAmelCase__ , [] , 0 )
def UpperCAmelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
if index == len(lowerCAmelCase__ ):
print(lowerCAmelCase__ )
return
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(lowerCAmelCase__ , lowerCAmelCase__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
snake_case_ : list[Any] =[3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['''A''', '''B''', '''C'''])
generate_all_subsequences(seq)
| 205 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : Tuple = '''codegen'''
UpperCamelCase_ : Tuple = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : int , lowerCAmelCase__ : List[str]=5_0_4_0_0 , lowerCAmelCase__ : Tuple=2_0_4_8 , lowerCAmelCase__ : Optional[int]=2_0_4_8 , lowerCAmelCase__ : str=4_0_9_6 , lowerCAmelCase__ : Optional[int]=2_8 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : List[Any]=6_4 , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Optional[int]="gelu_new" , lowerCAmelCase__ : Dict=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : str=1e-5 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Tuple=5_0_2_5_6 , lowerCAmelCase__ : Any=5_0_2_5_6 , lowerCAmelCase__ : Any=False , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Any = n_ctx
_UpperCAmelCase : Optional[int] = n_positions
_UpperCAmelCase : List[str] = n_embd
_UpperCAmelCase : Any = n_layer
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : List[str] = n_inner
_UpperCAmelCase : List[str] = rotary_dim
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : str = resid_pdrop
_UpperCAmelCase : str = embd_pdrop
_UpperCAmelCase : Any = attn_pdrop
_UpperCAmelCase : List[str] = layer_norm_epsilon
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Union[str, Any] = bos_token_id
_UpperCAmelCase : str = eos_token_id
super().__init__(
bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , tie_word_embeddings=lowerCamelCase__ , **lowerCamelCase__ )
class A__ ( UpperCamelCase ):
"""simple docstring"""
def __init__( self : Dict , lowerCAmelCase__ : PretrainedConfig , lowerCAmelCase__ : str = "default" , lowerCAmelCase__ : List[PatchingSpec] = None , lowerCAmelCase__ : bool = False , ) -> Optional[int]:
"""simple docstring"""
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCAmelCase : List[str] = 0
@property
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" )
_UpperCAmelCase : Union[str, Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_UpperCAmelCase : Tuple = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
return self._config.n_head
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : PreTrainedTokenizer , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : int = -1 , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[TensorType] = None , ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : int = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase : List[Any] = seqlen + 2
_UpperCAmelCase : Optional[int] = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_UpperCAmelCase : Dict = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : List[Any] = common_inputs['attention_mask']
if self.use_past:
_UpperCAmelCase : Tuple = ordered_inputs['attention_mask'].dtype
_UpperCAmelCase : Dict = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
return 1_3 | 494 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
snake_case_ : Tuple = {
'configuration_layoutlmv2': ['LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LayoutLMv2Config'],
'processing_layoutlmv2': ['LayoutLMv2Processor'],
'tokenization_layoutlmv2': ['LayoutLMv2Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : int = ['LayoutLMv2TokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Union[str, Any] = ['LayoutLMv2FeatureExtractor']
snake_case_ : str = ['LayoutLMv2ImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[int] = [
'LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv2ForQuestionAnswering',
'LayoutLMv2ForSequenceClassification',
'LayoutLMv2ForTokenClassification',
'LayoutLMv2Layer',
'LayoutLMv2Model',
'LayoutLMv2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
snake_case_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 195 | 0 |
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
A__ : Dict= """%20""".join(argv[1:]) if len(argv) > 1 else quote(str(input("""Search: """)))
print("""Googling.....""")
A__ : str= F"""https://www.google.com/search?q={query}&num=100"""
A__ : Optional[Any]= requests.get(
url,
headers={"""User-Agent""": str(UserAgent().random)},
)
try:
A__ : Tuple= (
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """yuRUbf"""})
.find("""a""")
.get("""href""")
)
except AttributeError:
A__ : Optional[Any]= parse_qs(
BeautifulSoup(res.text, """html.parser""")
.find("""div""", attrs={"""class""": """kCrYT"""})
.find("""a""")
.get("""href""")
)["""url"""][0]
webbrowser.open(link)
| 20 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
A__ : Any= """src/diffusers"""
# Matches is_xxx_available()
A__ : Tuple= re.compile(r"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
A__ : Any= re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
A__ : Optional[Any]= """
{0} = None
"""
A__ : List[Any]= """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
A__ : Dict= """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ = _re_backend.findall(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) == 0:
return None
return "_and_".join(SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( ) -> str:
"""simple docstring"""
with open(os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.readlines()
# Get to the point we do the actual imports for type checking
UpperCamelCase__ = 0
UpperCamelCase__ = {}
# Go through the end of the file
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
UpperCamelCase__ = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
UpperCamelCase__ = []
# Until we unindent, add backend objects to the list
while line_index < len(SCREAMING_SNAKE_CASE ) and len(lines[line_index] ) > 1:
UpperCamelCase__ = lines[line_index]
UpperCamelCase__ = _re_single_line_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(SCREAMING_SNAKE_CASE ) > 0:
UpperCamelCase__ = objects
else:
line_index += 1
return backend_specific_objects
def lowerCAmelCase_( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
if name.isupper():
return DUMMY_CONSTANT.format(SCREAMING_SNAKE_CASE )
elif name.islower():
return DUMMY_FUNCTION.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return DUMMY_CLASS.format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> int:
"""simple docstring"""
if backend_specific_objects is None:
UpperCamelCase__ = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
UpperCamelCase__ = {}
for backend, objects in backend_specific_objects.items():
UpperCamelCase__ = '[' + ', '.join(F'"{b}"' for b in backend.split('_and_' ) ) + ']'
UpperCamelCase__ = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for o in objects] )
UpperCamelCase__ = dummy_file
return dummy_files
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=False ) -> int:
"""simple docstring"""
UpperCamelCase__ = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
UpperCamelCase__ = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
UpperCamelCase__ = os.path.join(SCREAMING_SNAKE_CASE , 'utils' )
UpperCamelCase__ = {
backend: os.path.join(SCREAMING_SNAKE_CASE , F'dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py' )
for backend in dummy_files.keys()
}
UpperCamelCase__ = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(SCREAMING_SNAKE_CASE ):
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
UpperCamelCase__ = f.read()
else:
UpperCamelCase__ = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F'Updating diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py as the main '
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F'diffusers.utils.dummy_{short_names.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )}_objects.py. Run `make fix-copies` '
'to fix this.' )
if __name__ == "__main__":
A__ : Any= argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
A__ : Optional[int]= parser.parse_args()
check_dummies(args.fix_and_overwrite)
| 20 | 1 |
def __lowercase ( snake_case ):
"""simple docstring"""
if bit_count < 0:
raise ValueError('''The given input must be positive''' )
# get the generated string sequence
__magic_name__ :Optional[Any] = gray_code_sequence_string(snake_case )
#
# convert them to integers
for i in range(len(snake_case ) ):
__magic_name__ :List[Any] = int(sequence[i], 2 )
return sequence
def __lowercase ( snake_case ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__magic_name__ :Tuple = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__magic_name__ :str = gray_code_sequence_string(bit_count - 1 )
__magic_name__ :int = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__magic_name__ :int = '''0''' + smaller_sequence[i]
sequence.append(snake_case )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__magic_name__ :str = '''1''' + smaller_sequence[i]
sequence.append(snake_case )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase=1_3 , __lowerCAmelCase=7 , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=9_9 , __lowerCAmelCase=3_2 , __lowerCAmelCase=2 , __lowerCAmelCase=4 , __lowerCAmelCase=3_7 , __lowerCAmelCase="gelu" , __lowerCAmelCase=0.1 , __lowerCAmelCase=0.1 , __lowerCAmelCase=5_1_2 , __lowerCAmelCase=1_6 , __lowerCAmelCase=2 , __lowerCAmelCase=0.02 , __lowerCAmelCase=3 , __lowerCAmelCase=4 , __lowerCAmelCase=None , ):
"""simple docstring"""
__magic_name__ :Optional[int] = parent
__magic_name__ :List[Any] = 1_3
__magic_name__ :Union[str, Any] = 7
__magic_name__ :Optional[Any] = True
__magic_name__ :Tuple = True
__magic_name__ :List[str] = True
__magic_name__ :List[Any] = True
__magic_name__ :int = 9_9
__magic_name__ :Any = 3_2
__magic_name__ :Union[str, Any] = 2
__magic_name__ :List[str] = 4
__magic_name__ :List[Any] = 3_7
__magic_name__ :Tuple = '''gelu'''
__magic_name__ :Any = 0.1
__magic_name__ :str = 0.1
__magic_name__ :List[str] = 5_1_2
__magic_name__ :int = 1_6
__magic_name__ :Any = 2
__magic_name__ :List[Any] = 0.02
__magic_name__ :Optional[Any] = 3
__magic_name__ :Tuple = 4
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ :str = None
if self.use_input_mask:
__magic_name__ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ :str = None
if self.use_token_type_ids:
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ :Union[str, Any] = None
__magic_name__ :Tuple = None
__magic_name__ :str = None
if self.use_labels:
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ :List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ :str = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__lowerCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerModel(config=__lowerCAmelCase )
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__magic_name__ :List[str] = [input_ids, input_mask]
__magic_name__ :Any = model(__lowerCAmelCase )
__magic_name__ :List[str] = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Dict = True
__magic_name__ :List[str] = TFRoFormerForCausalLM(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerForMaskedLM(config=__lowerCAmelCase )
__magic_name__ :Any = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = self.num_labels
__magic_name__ :str = TFRoFormerForSequenceClassification(config=__lowerCAmelCase )
__magic_name__ :Optional[int] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :str = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.num_choices
__magic_name__ :Tuple = TFRoFormerForMultipleChoice(config=__lowerCAmelCase )
__magic_name__ :int = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Optional[Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :Union[str, Any] = tf.tile(tf.expand_dims(__lowerCAmelCase , 1 ) , (1, self.num_choices, 1) )
__magic_name__ :str = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__magic_name__ :Tuple = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.num_labels
__magic_name__ :Any = TFRoFormerForTokenClassification(config=__lowerCAmelCase )
__magic_name__ :str = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerForQuestionAnswering(config=__lowerCAmelCase )
__magic_name__ :List[str] = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__magic_name__ :Union[str, Any] = model(__lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) :Union[str, Any] = config_and_inputs
__magic_name__ :Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
a__ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ = False
a__ = False
def A ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A ( self ):
"""simple docstring"""
__magic_name__ :List[str] = TFRoFormerModelTester(self )
__magic_name__ :List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=3_7 )
def A ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCAmelCase )
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(__lowerCAmelCase )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def A ( self ):
"""simple docstring"""
__magic_name__ :int = TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
__magic_name__ :Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ :Optional[Any] = model(__lowerCAmelCase )[0]
# TODO Replace vocab size
__magic_name__ :int = 5_0_0_0_0
__magic_name__ :Tuple = [1, 6, vocab_size]
self.assertEqual(output.shape , __lowerCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__magic_name__ :Any = tf.constant(
[
[
[-0.12053341, -1.0264901, 0.29221946],
[-1.5133783, 0.197433, 0.15190607],
[-5.0135403, -3.900256, -0.84038764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __lowerCAmelCase , atol=1E-4 )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[int] = tf.constant([[4, 1_0]] )
__magic_name__ :Optional[int] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__magic_name__ :Optional[Any] = emba(input_ids.shape )
__magic_name__ :List[str] = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__magic_name__ :Union[str, Any] = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2 )
emba([2, 1_6, 5_1_2] )
__magic_name__ :Optional[int] = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCAmelCase , __lowerCAmelCase , atol=self.tolerance )
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
a__ = 1e-4
def A ( self ):
"""simple docstring"""
# 2,12,16,64
__magic_name__ :int = tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :str = -tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa ) , shape=(2, 1_2, 1_6, 6_4) ) / 1_0_0
__magic_name__ :int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4 )
__magic_name__ :List[str] = embed_positions([2, 1_6, 7_6_8] )[None, None, :, :]
__magic_name__ , __magic_name__ :Union[str, Any] = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
__magic_name__ :Tuple = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__magic_name__ :List[str] = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __lowerCAmelCase , atol=self.tolerance )
| 0 | 1 |
'''simple docstring'''
from PIL import Image
def lowerCAmelCase_ ( snake_case_ : Image , snake_case_ : float ) -> Image:
'''simple docstring'''
def brightness(snake_case_ : int ) -> float:
return 1_28 + level + (c - 1_28)
if not -255.0 <= level <= 255.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(snake_case_ )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
SCREAMING_SNAKE_CASE_: Optional[int] =change_brightness(img, 1_00)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 415 | '''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = abs(snake_case_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
return sum(int(snake_case_ ) for c in str(abs(snake_case_ ) ) )
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(snake_case_ : Callable , snake_case_ : int ) -> None:
UpperCAmelCase_ = f"""{func.__name__}({value})"""
UpperCAmelCase_ = timeit(f"""__main__.{call}""" , setup="import __main__" )
print(f"""{call:56} = {func(snake_case_ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(snake_case_ , snake_case_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 415 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = []
for part_id in partition_order:
UpperCAmelCase_ : str = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(_SCREAMING_SNAKE_CASE ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : str = spark.range(1_00 ).repartition(1 )
UpperCAmelCase_ : str = Spark(_SCREAMING_SNAKE_CASE )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Any = spark.range(10 ).repartition(2 )
UpperCAmelCase_ : Optional[int] = [1, 0]
UpperCAmelCase_ : Union[str, Any] = _generate_iterable_examples(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Reverse the partitions.
UpperCAmelCase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Any = spark.range(10 ).repartition(1 )
UpperCAmelCase_ : Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Union[str, Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
UpperCAmelCase_ : Any = lambda _SCREAMING_SNAKE_CASE : x.reverse()
UpperCAmelCase_ : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [2, 1, 0] )
UpperCAmelCase_ : List[str] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shuffle_data_sources(_SCREAMING_SNAKE_CASE )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> int:
"""simple docstring"""
UpperCAmelCase_ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : Union[str, Any] = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
UpperCAmelCase_ : Any = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [0, 2] )
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCAmelCase_ : List[str] = SparkExamplesIterable(_SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
UpperCAmelCase_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(_SCREAMING_SNAKE_CASE , [1, 3] )
for i, (row_id, row_dict) in enumerate(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def a__ ( ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
UpperCAmelCase_ : List[Any] = spark.range(1_00 ).repartition(1 )
UpperCAmelCase_ : List[str] = Spark(_SCREAMING_SNAKE_CASE )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 71 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ): # noqa: E741
while r - l > 1:
__a : Tuple = (l + r) // 2
if v[m] >= key:
__a : Dict = m
else:
__a : Dict = m # noqa: E741
return r
def __UpperCamelCase ( lowerCAmelCase__ : list[int] ):
if len(lowerCAmelCase__ ) == 0:
return 0
__a : List[str] = [0] * len(lowerCAmelCase__ )
__a : Any = 1
__a : int = v[0]
for i in range(1 , len(lowerCAmelCase__ ) ):
if v[i] < tail[0]:
__a : Optional[Any] = v[i]
elif v[i] > tail[length - 1]:
__a : Optional[Any] = v[i]
length += 1
else:
__a : List[str] = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 521 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
__snake_case = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
__snake_case = {
'facebook/bart-base': 1024,
'facebook/bart-large': 1024,
'facebook/bart-large-mnli': 1024,
'facebook/bart-large-cnn': 1024,
'facebook/bart-large-xsum': 1024,
'yjernite/bart_eli5': 1024,
}
@lru_cache()
def _lowerCamelCase ( ):
lowercase__ : Optional[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
lowercase__ : Dict = bs[:]
lowercase__ : Dict = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase__ )
cs.append(2**8 + n )
n += 1
lowercase__ : Optional[Any] = [chr(lowerCamelCase__ ) for n in cs]
return dict(zip(lowerCamelCase__ , lowerCamelCase__ ) )
def _lowerCamelCase ( lowerCamelCase__ : Any ):
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[int] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : List[Any] = char
return pairs
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : str = VOCAB_FILES_NAMES
_a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[Any]:
lowercase__ : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
lowercase__ : int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
lowercase__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
lowercase__ : str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
lowercase__ : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
lowercase__ : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="""utf-8""" ) as vocab_handle:
lowercase__ : Optional[int] = json.load(lowerCamelCase__ )
lowercase__ : List[Any] = {v: k for k, v in self.encoder.items()}
lowercase__ : Optional[Any] = errors # how to handle errors in decoding
lowercase__ : Dict = bytes_to_unicode()
lowercase__ : List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="""utf-8""" ) as merges_handle:
lowercase__ : Optional[Any] = merges_handle.read().split("""\n""" )[1:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ : List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
lowercase__ : Dict = {}
lowercase__ : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : int = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCAmelCase__( self ) -> Any:
return len(self.encoder )
def UpperCAmelCase__( self ) -> Union[str, Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.cache:
return self.cache[token]
lowercase__ : Tuple = tuple(lowerCamelCase__ )
lowercase__ : Tuple = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
lowercase__ : Any = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ : Dict = bigram
lowercase__ : Optional[int] = []
lowercase__ : Union[str, Any] = 0
while i < len(lowerCamelCase__ ):
try:
lowercase__ : Optional[int] = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ : Optional[Any] = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ : List[Any] = tuple(lowerCamelCase__ )
lowercase__ : List[str] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
lowercase__ : Optional[int] = get_pairs(lowerCamelCase__ )
lowercase__ : Dict = """ """.join(lowerCamelCase__ )
lowercase__ : str = word
return word
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
lowercase__ : List[str] = []
for token in re.findall(self.pat , lowerCamelCase__ ):
lowercase__ : Dict = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Union[str, Any]:
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Any:
return self.decoder.get(lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Dict:
lowercase__ : str = """""".join(lowerCamelCase__ )
lowercase__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : Any = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase__ : str = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + """\n""" )
lowercase__ : int = 0
with open(lowerCamelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowercase__ : Optional[int] = token_index
writer.write(""" """.join(lowerCamelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : List[str] = [self.cls_token_id]
lowercase__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : Tuple = [self.sep_token_id]
lowercase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> Optional[int]:
lowercase__ : Tuple = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
lowercase__ : Optional[int] = """ """ + text
return (text, kwargs)
| 715 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case = trt.Logger(trt.Logger.WARNING)
__snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case = logging.getLogger(__name__)
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__snake_case = parser.parse_args()
if args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__snake_case = args.per_device_eval_batch_size
__snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case = True
__snake_case = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__snake_case = 'temp_engine/bert-fp16.engine'
if args.inta:
__snake_case = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case = [network.get_input(i) for i in range(network.num_inputs)]
__snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowercase__ : List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ : Optional[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ : Dict = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
lowercase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ : str = time.time()
lowercase__ : Tuple = end_time - start_time
lowercase__ : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case = raw_datasets['validation'].column_names
__snake_case = 'question' if 'question' in column_names else column_names[0]
__snake_case = 'context' if 'context' in column_names else column_names[1]
__snake_case = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase ( lowerCamelCase__ : str ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ : List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ : Tuple = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ : Union[str, Any] = tokenized_examples.sequence_ids(lowerCamelCase__ )
lowercase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__snake_case = raw_datasets['validation']
# Validation Feature Creation
__snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__snake_case = default_data_collator
__snake_case = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase__ : List[Any] = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ : str = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ : str = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
__snake_case = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase ( lowerCamelCase__ : Tuple ):
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
__snake_case = 0.0
__snake_case = 0
__snake_case = timeit.default_timer()
__snake_case = None
for step, batch in enumerate(eval_dataloader):
__snake_case , __snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case , __snake_case = outputs
__snake_case = torch.tensor(start_logits)
__snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case = nested_truncate(all_preds, len(eval_dataset))
__snake_case = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
__snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}") | 128 | 0 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
_a : Dict = AlbertConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
_a : Dict = AlbertForPreTraining(__a )
# Load weights from tf checkpoint
load_tf_weights_in_albert(__a , __a , __a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , __a )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_snake_case = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 389 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
def a_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
A__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ = {"""unk_token""": """<unk>"""}
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__lowerCAmelCase ) )
A__ = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
A__ = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : str , **__lowerCAmelCase : Optional[int] ) -> str:
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Dict , **__lowerCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : List[Any] , **__lowerCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> str:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a_ ( self : int ) -> Tuple:
"""simple docstring"""
A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = self.get_image_processor()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__lowerCAmelCase )
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
A__ = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __lowerCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __lowerCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __lowerCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __lowerCAmelCase )
def a_ ( self : int ) -> Any:
"""simple docstring"""
A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
A__ = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def a_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(__lowerCAmelCase , return_tensors="""np""" )
A__ = processor(images=__lowerCAmelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def a_ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = processor(text=__lowerCAmelCase )
A__ = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def a_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__lowerCAmelCase ):
processor()
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(__lowerCAmelCase )
A__ = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = CLIPProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
A__ = """lower newer"""
A__ = self.prepare_image_inputs()
A__ = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 176 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'nielsr/canine-s': 2048,
}
# Unicode defines 1,114,112 total “codepoints”
_A = 111_4112
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
_A = 0
_A = 0xe_0_0_0
_A = 0xe_0_0_1
_A = 0xe_0_0_2
_A = 0xe_0_0_3
_A = 0xe_0_0_4
# Maps special codepoints to human-readable names.
_A = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
_A = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=chr(A_ ) , A_=False , A_=2048 , **A_ , ) -> Dict:
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else bos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else eos_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else sep_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else cls_token
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(A_ , lstrip=A_ , rstrip=A_ ) if isinstance(A_ , A_ ) else mask_token
super().__init__(
bos_token=A_ , eos_token=A_ , sep_token=A_ , cls_token=A_ , pad_token=A_ , mask_token=A_ , add_prefix_space=A_ , model_max_length=A_ , **A_ , )
# Creates a mapping for looking up the IDs of special symbols.
__UpperCamelCase ={}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__UpperCamelCase =codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__UpperCamelCase ={
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__UpperCamelCase =UNICODE_VOCAB_SIZE
__UpperCamelCase =len(self._special_codepoints )
@property
def _a ( self ) -> int:
return self._unicode_vocab_size
def _a ( self , A_ ) -> List[str]:
return list(A_ )
def _a ( self , A_ ) -> int:
try:
return ord(A_ )
except TypeError:
raise ValueError(f'invalid token: \'{token}\'' )
def _a ( self , A_ ) -> str:
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(A_ )
except TypeError:
raise ValueError(f'invalid id: {index}' )
def _a ( self , A_ ) -> Any:
return "".join(A_ )
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] + ([0] * len(A_ )) + [1]
if token_ids_a is not None:
result += ([0] * len(A_ )) + [1]
return result
def _a ( self , A_ , A_ = None ) -> List[int]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
__UpperCamelCase =len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def _a ( self , A_ , A_ = None ) -> List[Any]:
return ()
| 682 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 1 |
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : int ) -> list[int]:
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_lowerCamelCase )
if n > 1:
factors.append(_lowerCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 384 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
UpperCamelCase_ = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def _UpperCAmelCase ( ) -> List[Any]:
_lowerCAmelCase : Dict = Github(os.environ["""GITHUB_TOKEN"""] )
_lowerCAmelCase : Any = g.get_repo("""huggingface/diffusers""" )
_lowerCAmelCase : Tuple = repo.get_issues(state="""open""" )
for issue in open_issues:
_lowerCAmelCase : Union[str, Any] = sorted(issue.get_comments() , key=lambda _lowerCamelCase : i.created_at , reverse=_lowerCamelCase )
_lowerCAmelCase : List[Any] = comments[0] if len(_lowerCamelCase ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 384 | 1 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( enum.Enum ):
"""simple docstring"""
A__ : List[str] = 0
A__ : List[Any] = 1
@add_end_docstrings(_lowerCAmelCase )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "generated"
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Any:
A__ = {}
if truncation is not None:
A__ = truncation
A__ = generate_kwargs
A__ = {}
if return_tensors is not None and return_type is None:
A__ = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Any:
return True
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
A__ = self.model.config.prefix if self.model.config.prefix is not None else ""
if isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
A__ = ([prefix + arg for arg in args[0]],)
A__ = True
elif isinstance(args[0] , SCREAMING_SNAKE_CASE__ ):
A__ = (prefix + args[0],)
A__ = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
A__ = self.tokenizer(*SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
A__ = super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if (
isinstance(args[0] , SCREAMING_SNAKE_CASE__ )
and all(isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for el in args[0] )
and all(len(SCREAMING_SNAKE_CASE__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=TruncationStrategy.DO_NOT_TRUNCATE , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = self._parse_and_tokenize(SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return inputs
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
if self.framework == "pt":
A__ , A__ = model_inputs["input_ids"].shape
elif self.framework == "tf":
A__ , A__ = tf.shape(model_inputs["input_ids"] ).numpy()
A__ = generate_kwargs.get("min_length" , self.model.config.min_length )
A__ = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(SCREAMING_SNAKE_CASE__ , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
A__ = self.model.generate(**SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = output_ids.shape[0]
if self.framework == "pt":
A__ = output_ids.reshape(SCREAMING_SNAKE_CASE__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(SCREAMING_SNAKE_CASE__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=ReturnType.TEXT , SCREAMING_SNAKE_CASE__=False ) -> int:
A__ = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
A__ = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
A__ = {
f"""{self.return_name}_text""": self.tokenizer.decode(
SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ , )
}
records.append(SCREAMING_SNAKE_CASE__ )
return records
@add_end_docstrings(_lowerCAmelCase )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict = "summary"
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> int:
return super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bool:
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"a summarization task, where outputs shorter than the input are typically wanted, you might "
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCAmelCase )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : int = "translation"
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"increasing your max_length manually, e.g. translator('...', max_length=400)" )
return True
def snake_case__ ( self , *SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=TruncationStrategy.DO_NOT_TRUNCATE , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None ) -> Optional[Any]:
if getattr(self.tokenizer , "_build_translation_inputs" , SCREAMING_SNAKE_CASE__ ):
return self.tokenizer._build_translation_inputs(
*SCREAMING_SNAKE_CASE__ , return_tensors=self.framework , truncation=SCREAMING_SNAKE_CASE__ , src_lang=SCREAMING_SNAKE_CASE__ , tgt_lang=SCREAMING_SNAKE_CASE__ )
else:
return super()._parse_and_tokenize(*SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ ) -> List[Any]:
A__ , A__ , A__ = super()._sanitize_parameters(**SCREAMING_SNAKE_CASE__ )
if src_lang is not None:
A__ = src_lang
if tgt_lang is not None:
A__ = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
A__ = kwargs.get("task" , self.task )
A__ = task.split("_" )
if task and len(SCREAMING_SNAKE_CASE__ ) == 4:
# translation, XX, to YY
A__ = items[1]
A__ = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return super().__call__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
| 562 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def _lowerCamelCase ( UpperCAmelCase_ : Dict, UpperCAmelCase_ : int, UpperCAmelCase_ : int ) -> int:
"""simple docstring"""
A__ = 1.5
A__ = int(factor * num_class_images )
A__ = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=UpperCAmelCase_, aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""", exist_ok=UpperCAmelCase_ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
A__ = client.query(text=UpperCAmelCase_ )
if len(UpperCAmelCase_ ) >= factor * num_class_images or num_images > 1e4:
break
else:
A__ = int(factor * num_images )
A__ = ClipClient(
url="https://knn.laion.ai/knn-service", indice_name="laion_400m", num_images=UpperCAmelCase_, aesthetic_weight=0.1, )
A__ = 0
A__ = 0
A__ = tqdm(desc="downloading real regularization images", total=UpperCAmelCase_ )
with open(F"""{class_data_dir}/caption.txt""", "w" ) as fa, open(F"""{class_data_dir}/urls.txt""", "w" ) as fa, open(
F"""{class_data_dir}/images.txt""", "w" ) as fa:
while total < num_class_images:
A__ = class_images[count]
count += 1
try:
A__ = requests.get(images["url"] )
if img.status_code == 200:
A__ = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""", "wb" ) as f:
f.write(img.content )
fa.write(images["caption"] + "\n" )
fa.write(images["url"] + "\n" )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + "\n" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def _lowerCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser("", add_help=UpperCAmelCase_ )
parser.add_argument("--class_prompt", help="text prompt to retrieve images", required=UpperCAmelCase_, type=UpperCAmelCase_ )
parser.add_argument("--class_data_dir", help="path to save images", required=UpperCAmelCase_, type=UpperCAmelCase_ )
parser.add_argument("--num_class_images", help="number of images to download", default=200, type=UpperCAmelCase_ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 562 | 1 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
_UpperCamelCase = logging.getLogger(__name__)
@dataclass
class __a :
"""simple docstring"""
__UpperCamelCase : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCamelCase : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=__magic_name__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__UpperCamelCase : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCamelCase : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCamelCase : Optional[int] = field(
default=__magic_name__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class __a :
"""simple docstring"""
__UpperCamelCase : str = field(
default=__magic_name__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : str = field(
default=__magic_name__ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
__UpperCamelCase : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
__UpperCamelCase : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCamelCase : Optional[bool] = field(
default=__magic_name__ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
__UpperCamelCase : bool = field(
default=__magic_name__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__UpperCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCamelCase : bool = field(
default=__magic_name__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCamelCase : bool = field(
default=__magic_name__ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , lowercase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase__ : Optional[int] = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase__ : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase__ : Any = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase__ : Optional[Any] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Optional[int] = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase__ : Tuple = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase__ : Any = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Union[str, Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase__ : Optional[Any] = len(lowercase__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ : Tuple = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , idalabel={str(lowercase__ ): label for i, label in enumerate(lowercase__ )} , labelaid={label: i for i, label in enumerate(lowercase__ )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase__ : List[str] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ : List[str] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ : List[Any] = False
def preprocess_function(lowercase__ ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=lowercase__ , max_length=data_args.max_seq_length , truncation=lowercase__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase__ : Optional[Any] = min(len(lowercase__ ) , data_args.max_train_samples )
lowerCAmelCase__ : Optional[int] = train_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase__ : int = train_dataset.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase__ : Optional[int] = min(len(lowercase__ ) , data_args.max_eval_samples )
lowerCAmelCase__ : Union[str, Any] = eval_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase__ : List[str] = eval_dataset.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase__ : str = min(len(lowercase__ ) , data_args.max_predict_samples )
lowerCAmelCase__ : Union[str, Any] = predict_dataset.select(range(lowercase__ ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase__ : Dict = predict_dataset.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCAmelCase__ : int = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ ):
lowerCAmelCase__ : Union[str, Any] = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ : Union[str, Any] = np.argmax(lowercase__ , axis=1 )
return metric.compute(predictions=lowercase__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ : Tuple = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ : Tuple = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ : List[Any] = None
# Initialize our Trainer
lowerCAmelCase__ : int = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ : str = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ : Optional[Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ : List[str] = last_checkpoint
lowerCAmelCase__ : Optional[Any] = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ : str = train_result.metrics
lowerCAmelCase__ : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ : List[Any] = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , lowercase__ )
trainer.save_metrics("train" , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase__ : str = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ : Tuple = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ : Any = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics("eval" , lowercase__ )
trainer.save_metrics("eval" , lowercase__ )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = trainer.predict(lowercase__ , metric_key_prefix="predict" )
lowerCAmelCase__ : List[str] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ : str = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics("predict" , lowercase__ )
trainer.save_metrics("predict" , lowercase__ )
lowerCAmelCase__ : Union[str, Any] = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ : List[str] = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(lowercase__ , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ : List[Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 453 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_UpperCamelCase = random.Random()
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ) -> Optional[Any]:
if rng is None:
lowerCAmelCase__ : Union[str, Any] = global_rng
lowerCAmelCase__ : Dict = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=7 , snake_case=400 , snake_case=2_000 , snake_case=2_048 , snake_case=128 , snake_case=1 , snake_case=512 , snake_case=30 , snake_case=44_100 , ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : Optional[int] = max_seq_length
lowerCAmelCase__ : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : Dict = spectrogram_length
lowerCAmelCase__ : Any = feature_size
lowerCAmelCase__ : int = num_audio_channels
lowerCAmelCase__ : Optional[int] = hop_length
lowerCAmelCase__ : List[str] = chunk_length
lowerCAmelCase__ : Optional[Any] = sampling_rate
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE_ ( self , snake_case=False , snake_case=False ):
"""simple docstring"""
def _flatten(snake_case ):
return list(itertools.chain(*snake_case ) )
if equal_length:
lowerCAmelCase__ : Dict = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Dict = [np.asarray(snake_case ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(snake_case , "spectrogram_length" ) )
self.assertTrue(hasattr(snake_case , "feature_size" ) )
self.assertTrue(hasattr(snake_case , "num_audio_channels" ) )
self.assertTrue(hasattr(snake_case , "hop_length" ) )
self.assertTrue(hasattr(snake_case , "chunk_length" ) )
self.assertTrue(hasattr(snake_case , "sampling_rate" ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.save_pretrained(snake_case )[0]
check_json_file_has_correct_format(snake_case )
lowerCAmelCase__ : Tuple = self.feature_extraction_class.from_pretrained(snake_case )
lowerCAmelCase__ : Union[str, Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : int = feat_extract_second.to_dict()
lowerCAmelCase__ : Union[str, Any] = dict_first.pop("mel_filters" )
lowerCAmelCase__ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = os.path.join(snake_case , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case )
lowerCAmelCase__ : List[Any] = self.feature_extraction_class.from_json_file(snake_case )
lowerCAmelCase__ : Optional[Any] = feat_extract_first.to_dict()
lowerCAmelCase__ : Dict = feat_extract_second.to_dict()
lowerCAmelCase__ : Tuple = dict_first.pop("mel_filters" )
lowerCAmelCase__ : Dict = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(snake_case , snake_case ) )
self.assertEqual(snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Optional[int] = [np.asarray(snake_case ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowerCAmelCase__ : Optional[int] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowerCAmelCase__ : List[str] = feature_extractor(
snake_case , return_tensors="np" , sampling_rate=44_100 , mask_audio=snake_case ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : Tuple = np.asarray(snake_case )
lowerCAmelCase__ : List[Any] = feature_extractor(snake_case , return_tensors="np" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE_ ( self , snake_case ):
"""simple docstring"""
lowerCAmelCase__ : str = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
lowerCAmelCase__ : int = ds.sort("id" ).select(range(snake_case ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ : str = self._load_datasamples(1 )
lowerCAmelCase__ : Optional[Any] = TvltFeatureExtractor()
lowerCAmelCase__ : Dict = feature_extractor(snake_case , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowerCAmelCase__ : Dict = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case , atol=1e-4 ) )
| 453 | 1 |
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'''files''' , [
['''full:README.md''', '''dataset_infos.json'''],
['''empty:README.md''', '''dataset_infos.json'''],
['''dataset_infos.json'''],
['''full:README.md'''],
] , )
def __UpperCAmelCase ( A : Tuple , A : Any ) -> Optional[int]:
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp('''dset_infos_dir''' )
if "full:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''---\ndataset_info:\n dataset_size: 42\n---''' )
if "empty:README.md" in files:
with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f:
f.write('''''' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f:
f.write('''{"default": {"dataset_size": 42}}''' )
UpperCAmelCase_ : List[Any] = DatasetInfosDict.from_directory(A )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'''dataset_info''' , [
DatasetInfo(),
DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , ),
] , )
def __UpperCAmelCase ( A : Union[str, Any] , A : DatasetInfo ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[Any] = str(A )
dataset_info.write_to_directory(A )
UpperCAmelCase_ : Optional[int] = DatasetInfo.from_directory(A )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(A , '''dataset_info.json''' ) )
def __UpperCAmelCase ( ) -> Dict:
UpperCAmelCase_ : List[Any] = DatasetInfo(
description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
UpperCAmelCase_ : Optional[int] = dataset_info._to_yaml_dict()
assert sorted(A ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
UpperCAmelCase_ : str = yaml.safe_dump(A )
UpperCAmelCase_ : List[str] = yaml.safe_load(A )
assert dataset_info_yaml_dict == reloaded
def __UpperCAmelCase ( ) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = DatasetInfo()
UpperCAmelCase_ : Optional[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'''dataset_infos_dict''' , [
DatasetInfosDict(),
DatasetInfosDict({'''default''': DatasetInfo()} ),
DatasetInfosDict({'''my_config_name''': DatasetInfo()} ),
DatasetInfosDict(
{
'''default''': DatasetInfo(
description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'''v1''': DatasetInfo(dataset_size=4_2 ),
'''v2''': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def __UpperCAmelCase ( A : Tuple , A : DatasetInfosDict ) -> Dict:
UpperCAmelCase_ : List[Any] = str(A )
dataset_infos_dict.write_to_directory(A )
UpperCAmelCase_ : str = DatasetInfosDict.from_directory(A )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
UpperCAmelCase_ : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
UpperCAmelCase_ : List[Any] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(A , '''README.md''' ) )
| 703 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 351 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
snake_case_ : Optional[int] = logging.get_logger(__name__)
snake_case_ : List[str] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
snake_case_ : Optional[Any] = {
'''vocab_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'''
),
'''squeezebert/squeezebert-mnli''': '''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt''',
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''squeezebert/squeezebert-uncased''': (
'''https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'''
),
'''squeezebert/squeezebert-mnli-headless''': (
'''https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'''
),
},
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': 512,
'''squeezebert/squeezebert-mnli''': 512,
'''squeezebert/squeezebert-mnli-headless''': 512,
}
snake_case_ : Tuple = {
'''squeezebert/squeezebert-uncased''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli''': {'''do_lower_case''': True},
'''squeezebert/squeezebert-mnli-headless''': {'''do_lower_case''': True},
}
class A_ ( lowerCAmelCase_ ):
'''simple docstring'''
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = SqueezeBertTokenizer
def __init__( self , A_=None , A_=None , A_=True , A_="[UNK]" , A_="[SEP]" , A_="[PAD]" , A_="[CLS]" , A_="[MASK]" , A_=True , A_=None , **A_ , ):
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , tokenize_chinese_chars=A_ , strip_accents=A_ , **A_ , )
_UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A_ ) != do_lower_case
or normalizer_state.get("strip_accents" , A_ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A_ ) != tokenize_chinese_chars
):
_UpperCamelCase = getattr(A_ , normalizer_state.pop("type" ) )
_UpperCamelCase = do_lower_case
_UpperCamelCase = strip_accents
_UpperCamelCase = tokenize_chinese_chars
_UpperCamelCase = normalizer_class(**A_ )
_UpperCamelCase = do_lower_case
def a ( self , A_ , A_=None ):
_UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a ( self , A_ , A_ = None ):
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a ( self , A_ , A_ = None ):
_UpperCamelCase = self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
| 138 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
snake_case_ = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class _lowercase ( UpperCAmelCase__ ):
_UpperCamelCase = 'albert'
def __init__( self , _UpperCAmelCase=30_000 , _UpperCAmelCase=128 , _UpperCAmelCase=4_096 , _UpperCAmelCase=12 , _UpperCAmelCase=1 , _UpperCAmelCase=64 , _UpperCAmelCase=16_384 , _UpperCAmelCase=1 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=0 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-1_2 , _UpperCAmelCase=0.1 , _UpperCAmelCase="absolute" , _UpperCAmelCase=0 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , **_UpperCAmelCase , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
A : List[str] = vocab_size
A : Optional[int] = embedding_size
A : Optional[int] = hidden_size
A : Tuple = num_hidden_layers
A : Any = num_hidden_groups
A : List[Any] = num_attention_heads
A : List[Any] = inner_group_num
A : Optional[int] = hidden_act
A : int = intermediate_size
A : Any = hidden_dropout_prob
A : Union[str, Any] = attention_probs_dropout_prob
A : int = max_position_embeddings
A : Any = type_vocab_size
A : int = initializer_range
A : List[Any] = layer_norm_eps
A : Dict = classifier_dropout_prob
A : int = position_embedding_type
class _lowercase ( UpperCAmelCase__ ):
@property
def snake_case ( self ):
if self.task == "multiple-choice":
A : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A : Dict = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 714 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
snake_case_ = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase( UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=None ) -> List[Any]:
if rng is None:
A : int = random.Random()
A : int = 1
for dim in shape:
total_dims *= dim
A : Union[str, Any] = []
for _ in range(UpperCamelCase__ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
A : Union[str, Any] = np.array(UpperCamelCase__ , dtype=jnp.intaa ).reshape(UpperCamelCase__ )
return output
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None ) -> Dict:
A : Union[str, Any] = ids_tensor(UpperCamelCase__ , vocab_size=2 , rng=UpperCamelCase__ )
# make sure that at least one token is attended to for each batch
A : Any = 1
return attn_mask
@require_flax
class _lowercase :
_UpperCamelCase = None
_UpperCamelCase = ()
def snake_case ( self ):
A, A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A : Dict = 2
A : List[str] = inputs['''input_ids'''].shape[-1] // 2
A : int = inputs['''input_ids'''][:max_batch_size, :sequence_length]
A : Optional[Any] = jnp.ones_like(_UpperCAmelCase )
A : int = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A : Optional[int] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A : str = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : Optional[int] = False
A : Union[str, Any] = max_length
A : str = 0
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
A : int = getattr(_UpperCAmelCase , _UpperCAmelCase )
A : Union[str, Any] = pt_model_class(_UpperCAmelCase ).eval()
A : Tuple = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
A : Union[str, Any] = flax_model.generate(_UpperCAmelCase ).sequences
A : Optional[int] = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A : List[Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : str = self._get_input_ids_and_config()
A : Optional[int] = False
A : Dict = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : List[str] = True
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : Any = model_class(_UpperCAmelCase )
A : Tuple = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Tuple = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : List[str] = self._get_input_ids_and_config()
A : Any = False
A : str = max_length
A : Optional[int] = 2
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : List[Any] = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[int] = self._get_input_ids_and_config()
A : Dict = False
A : List[Any] = max_length
A : Optional[int] = 2
A : int = 2
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : List[str] = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case ( self ):
A, A, A, A : List[Any] = self._get_input_ids_and_config()
A : Any = True
A : Any = max_length
A : Any = 0.8
A : List[Any] = 10
A : List[Any] = 0.3
A : Union[str, Any] = 1
A : Any = 8
A : List[Any] = 9
for model_class in self.all_generative_model_classes:
A : List[Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : List[Any] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
A : int = max_length
A : Dict = 1
A : Tuple = 8
A : Optional[Any] = 9
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : str = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : int = jit(model.generate )
A : Optional[int] = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : int = self._get_input_ids_and_config()
A : List[Any] = max_length
A : Optional[Any] = 2
A : Tuple = 1
A : List[Any] = 8
A : Dict = 9
for model_class in self.all_generative_model_classes:
A : Union[str, Any] = model_class(_UpperCAmelCase )
A : Any = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Union[str, Any] = jit(model.generate )
A : int = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
A : Union[str, Any] = attention_mask.at[(0, 0)].set(0 )
A : int = False
A : List[Any] = max_length
for model_class in self.all_generative_model_classes:
A : List[str] = model_class(_UpperCAmelCase )
A : Dict = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Dict = jit(model.generate )
A : Optional[Any] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[Any] = attention_mask.at[(0, 0)].set(0 )
A : Optional[int] = True
A : int = max_length
for model_class in self.all_generative_model_classes:
A : int = model_class(_UpperCAmelCase )
A : Union[str, Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : Optional[Any] = jit(model.generate )
A : Dict = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case ( self ):
A, A, A, A : Optional[Any] = self._get_input_ids_and_config()
# pad attention mask on the left
A : Optional[int] = attention_mask.at[(0, 0)].set(0 )
A : Any = 2
A : Any = max_length
for model_class in self.all_generative_model_classes:
A : Dict = model_class(_UpperCAmelCase )
A : Optional[Any] = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
A : str = jit(model.generate )
A : List[str] = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class _lowercase ( unittest.TestCase ):
def snake_case ( self ):
A : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
A : Dict = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
A : Union[str, Any] = '''Hello world'''
A : Union[str, Any] = tokenizer(_UpperCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , '''do_samples''' ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , '''foo''' ):
A : List[Any] = {'''foo''': '''bar'''}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 537 | 0 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'encoder.layer_norm_for_extract': 'layer_norm_for_extract',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'label_embs_concat': 'label_embeddings_concat',
'mask_emb': 'masked_spec_embed',
'spk_proj': 'speaker_proj',
}
lowercase : Dict = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'label_embeddings_concat',
'speaker_proj',
'layer_norm_for_extract',
]
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : str) -> int:
'''simple docstring'''
for attribute in key.split("."):
__UpperCamelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase)
if weight_type is not None:
__UpperCamelCase : Any = getattr(_lowerCamelCase , _lowerCamelCase).shape
else:
__UpperCamelCase : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}')
if weight_type == "weight":
__UpperCamelCase : Any = value
elif weight_type == "weight_g":
__UpperCamelCase : int = value
elif weight_type == "weight_v":
__UpperCamelCase : str = value
elif weight_type == "bias":
__UpperCamelCase : List[str] = value
else:
__UpperCamelCase : Optional[int] = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Tuple) -> str:
'''simple docstring'''
__UpperCamelCase : int = []
__UpperCamelCase : Dict = fairseq_model.state_dict()
__UpperCamelCase : Tuple = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
__UpperCamelCase : str = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
__UpperCamelCase : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
__UpperCamelCase : Union[str, Any] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if "layer_norm_for_extract" in name and (".".join(name.split(".")[:-1]) != key):
# special case since naming is very similar
continue
__UpperCamelCase : List[str] = True
if "*" in mapped_key:
__UpperCamelCase : Dict = name.split(_lowerCamelCase)[0].split(".")[-2]
__UpperCamelCase : Optional[Any] = mapped_key.replace("*" , _lowerCamelCase)
if "weight_g" in name:
__UpperCamelCase : List[Any] = "weight_g"
elif "weight_v" in name:
__UpperCamelCase : str = "weight_v"
elif "bias" in name:
__UpperCamelCase : Union[str, Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__UpperCamelCase : Dict = "weight"
else:
__UpperCamelCase : Any = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
continue
if not is_used:
unused_weights.append(_lowerCamelCase)
logger.warning(F'Unused weights: {unused_weights}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Dict = full_name.split("conv_layers.")[-1]
__UpperCamelCase : List[str] = name.split(".")
__UpperCamelCase : int = int(items[0])
__UpperCamelCase : Tuple = int(items[1])
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.')
__UpperCamelCase : Optional[Any] = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.')
__UpperCamelCase : Any = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.')
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.')
__UpperCamelCase : str = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.')
__UpperCamelCase : Optional[Any] = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.')
else:
unused_weights.append(_lowerCamelCase)
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=True) -> Any:
'''simple docstring'''
if config_path is not None:
__UpperCamelCase : str = UniSpeechSatConfig.from_pretrained(_lowerCamelCase)
else:
__UpperCamelCase : int = UniSpeechSatConfig()
__UpperCamelCase : str = ""
if is_finetuned:
__UpperCamelCase : str = UniSpeechSatForCTC(_lowerCamelCase)
else:
__UpperCamelCase : int = UniSpeechSatForPreTraining(_lowerCamelCase)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
__UpperCamelCase : str = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase)
hf_wavavec.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
lowercase : str = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 557 |
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
'snap-research/efficientformer-l1-300': (
'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'
),
}
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
_A = 'efficientformer'
def __init__( self :Union[str, Any] , a :List[int] = [3, 2, 6, 4] , a :List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , a :List[bool] = [True, True, True, True] , a :int = 4_4_8 , a :int = 3_2 , a :int = 4 , a :int = 7 , a :int = 5 , a :int = 8 , a :int = 4 , a :float = 0.0 , a :int = 1_6 , a :int = 3 , a :int = 3 , a :int = 3 , a :int = 2 , a :int = 1 , a :float = 0.0 , a :int = 1 , a :bool = True , a :bool = True , a :float = 1E-5 , a :str = "gelu" , a :float = 0.02 , a :float = 1E-1_2 , a :int = 2_2_4 , a :float = 1E-0_5 , **a :str , ) -> None:
super().__init__(**a )
__UpperCamelCase : Optional[Any] = hidden_act
__UpperCamelCase : Any = hidden_dropout_prob
__UpperCamelCase : str = hidden_sizes
__UpperCamelCase : Dict = num_hidden_layers
__UpperCamelCase : Optional[Any] = num_attention_heads
__UpperCamelCase : str = initializer_range
__UpperCamelCase : List[str] = layer_norm_eps
__UpperCamelCase : str = patch_size
__UpperCamelCase : str = num_channels
__UpperCamelCase : Any = depths
__UpperCamelCase : Tuple = mlp_expansion_ratio
__UpperCamelCase : List[str] = downsamples
__UpperCamelCase : Optional[int] = dim
__UpperCamelCase : Dict = key_dim
__UpperCamelCase : Optional[Any] = attention_ratio
__UpperCamelCase : Dict = resolution
__UpperCamelCase : Union[str, Any] = pool_size
__UpperCamelCase : Tuple = downsample_patch_size
__UpperCamelCase : Optional[int] = downsample_stride
__UpperCamelCase : Optional[int] = downsample_pad
__UpperCamelCase : Union[str, Any] = drop_path_rate
__UpperCamelCase : Union[str, Any] = num_metaad_blocks
__UpperCamelCase : List[str] = distillation
__UpperCamelCase : str = use_layer_scale
__UpperCamelCase : Tuple = layer_scale_init_value
__UpperCamelCase : str = image_size
__UpperCamelCase : int = batch_norm_eps | 557 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( A : list[int] , A : int ):
'''simple docstring'''
UpperCAmelCase = len(A )
UpperCAmelCase = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
UpperCAmelCase = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
UpperCAmelCase = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
UpperCAmelCase = subset[i - 1][j]
if arr[i - 1] <= j:
UpperCAmelCase = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 50 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase : Union[str, Any] = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 50 | 1 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(__snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(__snake_case ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(__snake_case ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(__snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 | '''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_UpperCAmelCase : Any = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
_UpperCAmelCase : str = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
_UpperCAmelCase : Union[str, Any] = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ ( datasets.Metric ):
"""simple docstring"""
def __UpperCAmelCase ( self : List[Any] ) -> MetricInfo:
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string', id='token' ), id='sequence' ), id='references' ),
} ), )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[List[List[str]]], UpperCamelCase__ : List[List[str]], UpperCamelCase__ : int = 1, UpperCamelCase__ : int = 4, ) -> Dict[str, float]:
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=UpperCamelCase__, hypotheses=UpperCamelCase__, min_len=UpperCamelCase__, max_len=UpperCamelCase__ )
}
| 107 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : int = {
"microsoft/unispeech-large-1500h-cv": (
"https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json"
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class __A ( UpperCamelCase__ ):
UpperCamelCase = """unispeech"""
def __init__( self :List[str] , __snake_case :Optional[int]=32 , __snake_case :Any=7_68 , __snake_case :Any=12 , __snake_case :int=12 , __snake_case :Optional[Any]=30_72 , __snake_case :Any="gelu" , __snake_case :Any=0.1 , __snake_case :Any=0.1 , __snake_case :str=0.1 , __snake_case :Dict=0.0 , __snake_case :Optional[int]=0.0 , __snake_case :Tuple=0.1 , __snake_case :List[Any]=0.1 , __snake_case :int=0.02 , __snake_case :int=1E-5 , __snake_case :Union[str, Any]="group" , __snake_case :int="gelu" , __snake_case :List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __snake_case :int=(5, 2, 2, 2, 2, 2, 2) , __snake_case :List[Any]=(10, 3, 3, 3, 3, 2, 2) , __snake_case :str=False , __snake_case :str=1_28 , __snake_case :Dict=16 , __snake_case :Dict=False , __snake_case :Dict=True , __snake_case :Tuple=0.05 , __snake_case :List[Any]=10 , __snake_case :Any=2 , __snake_case :Tuple=0.0 , __snake_case :Optional[int]=10 , __snake_case :Dict=0 , __snake_case :List[Any]=3_20 , __snake_case :str=2 , __snake_case :int=0.1 , __snake_case :Union[str, Any]=1_00 , __snake_case :Union[str, Any]=2_56 , __snake_case :List[Any]=2_56 , __snake_case :Union[str, Any]=0.1 , __snake_case :Tuple="mean" , __snake_case :int=False , __snake_case :int=False , __snake_case :List[str]=2_56 , __snake_case :List[str]=80 , __snake_case :Optional[Any]=0 , __snake_case :int=1 , __snake_case :str=2 , __snake_case :Optional[int]=0.5 , **__snake_case :Union[str, Any] , ):
'''simple docstring'''
super().__init__(**__snake_case , pad_token_id=__snake_case , bos_token_id=__snake_case , eos_token_id=__snake_case )
__magic_name__ : str =hidden_size
__magic_name__ : Optional[int] =feat_extract_norm
__magic_name__ : List[str] =feat_extract_activation
__magic_name__ : Any =list(__snake_case )
__magic_name__ : int =list(__snake_case )
__magic_name__ : int =list(__snake_case )
__magic_name__ : int =conv_bias
__magic_name__ : List[str] =num_conv_pos_embeddings
__magic_name__ : str =num_conv_pos_embedding_groups
__magic_name__ : Union[str, Any] =len(self.conv_dim )
__magic_name__ : int =num_hidden_layers
__magic_name__ : Union[str, Any] =intermediate_size
__magic_name__ : Tuple =hidden_act
__magic_name__ : Optional[int] =num_attention_heads
__magic_name__ : Dict =hidden_dropout
__magic_name__ : Optional[int] =attention_dropout
__magic_name__ : Tuple =activation_dropout
__magic_name__ : List[str] =feat_proj_dropout
__magic_name__ : Optional[int] =final_dropout
__magic_name__ : Optional[int] =layerdrop
__magic_name__ : Optional[int] =layer_norm_eps
__magic_name__ : List[Any] =initializer_range
__magic_name__ : Tuple =num_ctc_classes
__magic_name__ : Any =vocab_size
__magic_name__ : Optional[Any] =do_stable_layer_norm
__magic_name__ : Dict =use_weighted_layer_sum
__magic_name__ : Dict =classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : Optional[Any] =apply_spec_augment
__magic_name__ : List[str] =mask_time_prob
__magic_name__ : Any =mask_time_length
__magic_name__ : List[str] =mask_time_min_masks
__magic_name__ : int =mask_feature_prob
__magic_name__ : int =mask_feature_length
__magic_name__ : Tuple =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
__magic_name__ : List[Any] =num_codevectors_per_group
__magic_name__ : int =num_codevector_groups
__magic_name__ : Dict =contrastive_logits_temperature
__magic_name__ : str =feat_quantizer_dropout
__magic_name__ : Tuple =num_negatives
__magic_name__ : Union[str, Any] =codevector_dim
__magic_name__ : Dict =proj_codevector_dim
__magic_name__ : Tuple =diversity_loss_weight
# ctc loss
__magic_name__ : Optional[int] =ctc_loss_reduction
__magic_name__ : Dict =ctc_zero_infinity
# pretraining loss
__magic_name__ : Optional[Any] =replace_prob
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 718 |
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =[]
__magic_name__ : int =[]
__magic_name__ : str ={
"""^""": 3,
"""*""": 2,
"""/""": 2,
"""%""": 2,
"""+""": 1,
"""-""": 1,
} # Priority of each operator
__magic_name__ : Dict =len(lowerCamelCase ) if (len(lowerCamelCase ) > 7) else 7
# Print table header for output
print(
"""Symbol""".center(8 ) , """Stack""".center(lowerCamelCase ) , """Postfix""".center(lowerCamelCase ) , sep=""" | """ , )
print("""-""" * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(lowerCamelCase ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(lowerCamelCase ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(lowerCamelCase ) == 0:
stack.append(lowerCamelCase ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(lowerCamelCase ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(lowerCamelCase ) # push x to stack
print(
x.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
while len(lowerCamelCase ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
""" """.center(8 ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , ("""""".join(lowerCamelCase )).ljust(lowerCamelCase ) , sep=""" | """ , ) # Output in tabular format
return "".join(lowerCamelCase ) # return Postfix as str
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Union[str, Any] =list(infix[::-1] ) # reverse the infix equation
for i in range(len(lowerCamelCase ) ):
if infix[i] == "(":
__magic_name__ : str =""")""" # change "(" to ")"
elif infix[i] == ")":
__magic_name__ : int ="""(""" # change ")" to "("
return (infix_2_postfix("""""".join(lowerCamelCase ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = input("\nEnter an Infix Equation = ") # Input an Infix equation
UpperCAmelCase_ : Dict = "".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 367 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
snake_case = """\
Text data.
Second line of data."""
snake_case = """file"""
@pytest.fixture(scope="session" )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = tmp_path_factory.mktemp("data" ) / (FILE_PATH + ".zstd")
SCREAMING_SNAKE_CASE : Optional[int] = bytes(lowercase , "utf-8" )
with zstd.open(lowercase , "wb" ) as f:
f.write(lowercase )
return path
@pytest.fixture
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , lowercase ) , "w" ) as f:
f.write(lowercase )
return FILE_PATH
@pytest.mark.parametrize("compression_format" , ["gzip", "xz", "zstd"] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_path}
SCREAMING_SNAKE_CASE : int = input_paths[compression_format]
SCREAMING_SNAKE_CASE : Union[str, Any] = tmp_path / "cache"
SCREAMING_SNAKE_CASE : int = DownloadConfig(cache_dir=lowercase , extract_compressed_file=lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = cached_path(lowercase , download_config=lowercase )
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE : Dict = f.read()
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("default_extracted" , [True, False] )
@pytest.mark.parametrize("default_cache_dir" , [True, False] )
def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = "custom_cache"
SCREAMING_SNAKE_CASE : str = "custom_extracted_dir"
SCREAMING_SNAKE_CASE : str = tmp_path / "custom_extracted_path"
if default_extracted:
SCREAMING_SNAKE_CASE : str = ("downloads" if default_cache_dir else custom_cache_dir, "extracted")
else:
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_DIR" , lowercase )
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowercase ) )
SCREAMING_SNAKE_CASE : str = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
SCREAMING_SNAKE_CASE : List[str] = xz_file
SCREAMING_SNAKE_CASE : int = (
DownloadConfig(extract_compressed_file=lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase )
)
SCREAMING_SNAKE_CASE : Any = cached_path(lowercase , download_config=lowercase )
assert Path(lowercase ).parent.parts[-2:] == expected
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = str(Path(lowercase ).resolve() )
assert cached_path(lowercase ) == text_file
# relative path
SCREAMING_SNAKE_CASE : Optional[Any] = str(Path(lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase ) == text_file
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(tmp_path.resolve() / "__missing_file__.txt" )
with pytest.raises(lowercase ):
cached_path(lowercase )
# relative path
SCREAMING_SNAKE_CASE : Optional[int] = "./__missing_file__.txt"
with pytest.raises(lowercase ):
cached_path(lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(lowercase ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowercase )
def lowerCamelCase__ ( ):
"""simple docstring"""
with pytest.raises(lowercase ):
cached_path("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowercase ):
http_get("https://huggingface.co" , temp_file=lowercase )
with pytest.raises(lowercase ):
http_head("https://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowercase ):
ftp_get("ftp://huggingface.co" , temp_file=lowercase )
with pytest.raises(lowercase ):
ftp_head("ftp://huggingface.co" )
@patch("datasets.config.HF_DATASETS_OFFLINE" , lowercase )
def lowerCamelCase__ ( lowercase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp("data" ) / "file.html"
with pytest.raises(lowercase ):
fsspec_get("s3://huggingface.co" , temp_file=lowercase )
with pytest.raises(lowercase ):
fsspec_head("s3://huggingface.co" )
| 62 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCAmelCase : int = """bart"""
lowerCAmelCase : List[Any] = True
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> List[Any]:
if LOAD_DENSE_INDEX:
lowerCamelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
lowerCamelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
lowerCamelCase = qar_model.eval()
else:
lowerCamelCase , lowerCamelCase = (None, None)
if MODEL_TYPE == "bart":
lowerCamelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
lowerCamelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
lowerCamelCase = sas_model.eval()
else:
lowerCamelCase , lowerCamelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> List[str]:
if LOAD_DENSE_INDEX:
lowerCamelCase = faiss.StandardGpuResources()
lowerCamelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
lowerCamelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_28) , )
lowerCamelCase = faiss.IndexFlatIP(1_28 )
lowerCamelCase = faiss.index_cpu_to_gpu(snake_case__ , 1 , snake_case__ )
wikiaab_gpu_index_flat.add(snake_case__ ) # TODO fix for larger GPU
else:
lowerCamelCase , lowerCamelCase = (None, None)
lowerCamelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case__ )
def a__ ( ) -> Any:
lowerCamelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
lowerCamelCase = elia["""train_eli5"""]
lowerCamelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_28) )
lowerCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(snake_case__ )
return (elia_train, eli5_train_q_index)
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = load_indexes()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[Any] = load_models()
lowerCAmelCase , lowerCAmelCase : int = load_train_data()
def a__ ( snake_case__ , snake_case__=10 ) -> Tuple:
lowerCamelCase = embed_questions_for_retrieval([question] , snake_case__ , snake_case__ )
lowerCamelCase , lowerCamelCase = eli5_train_q_index.search(snake_case__ , snake_case__ )
lowerCamelCase = [elia_train[int(snake_case__ )] for i in I[0]]
return nn_examples
def a__ ( snake_case__ , snake_case__="wiki40b" , snake_case__="dense" , snake_case__=10 ) -> Dict:
if source == "none":
lowerCamelCase , lowerCamelCase = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
lowerCamelCase , lowerCamelCase = query_qa_dense_index(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
lowerCamelCase , lowerCamelCase = query_es_index(
snake_case__ , snake_case__ , index_name="""english_wiki40b_snippets_100w""" , n_results=snake_case__ , )
lowerCamelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
lowerCamelCase = """question: {} context: {}""".format(snake_case__ , snake_case__ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case__ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case__ : None),
} )
def a__ ( snake_case__ , snake_case__ , snake_case__ , snake_case__=64 , snake_case__=2_56 , snake_case__=False , snake_case__=2 , snake_case__=0.95 , snake_case__=0.8 ) -> str:
with torch.no_grad():
lowerCamelCase = qa_sas_generate(
snake_case__ , snake_case__ , snake_case__ , num_answers=1 , num_beams=snake_case__ , min_len=snake_case__ , max_len=snake_case__ , do_sample=snake_case__ , temp=snake_case__ , top_p=snake_case__ , top_k=snake_case__ , max_input_length=10_24 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
lowerCAmelCase : Tuple = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
lowerCAmelCase : Optional[int] = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCAmelCase : Union[str, Any] = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCAmelCase : Union[str, Any] = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
lowerCAmelCase : Tuple = st.sidebar.checkbox("""Demo options""")
if demo_options:
lowerCAmelCase : Optional[int] = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
lowerCAmelCase : Tuple = action_list.index(action_st)
lowerCAmelCase : Union[str, Any] = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
lowerCAmelCase : Any = show_type == """Show full text of passages"""
else:
lowerCAmelCase : int = 3
lowerCAmelCase : Tuple = True
lowerCAmelCase : List[str] = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
lowerCAmelCase : List[str] = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
lowerCAmelCase : str = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
lowerCAmelCase : Optional[Any] = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
lowerCAmelCase : Optional[int] = """wiki40b"""
lowerCAmelCase : List[str] = """dense"""
lowerCAmelCase : Tuple = """beam"""
lowerCAmelCase : Optional[int] = 2
lowerCAmelCase : Dict = 64
lowerCAmelCase : Optional[int] = 256
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Optional[int] = None
lowerCAmelCase : Tuple = st.sidebar.checkbox("""Generation options""")
if generate_options:
lowerCAmelCase : Optional[Any] = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
lowerCAmelCase : str = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
lowerCAmelCase : int = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCAmelCase : Tuple = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCAmelCase : int = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCAmelCase : str = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCAmelCase : int = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCAmelCase : List[Any] = None
# start main text
lowerCAmelCase : Optional[int] = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
lowerCAmelCase : List[str] = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCAmelCase : Any = st.text_input("""Enter your question here:""", """""")
else:
lowerCAmelCase : List[str] = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCAmelCase , lowerCAmelCase : Any = make_support(question, source=wiki_source, method="""dense""", n_results=10)
lowerCAmelCase , lowerCAmelCase : Tuple = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
lowerCAmelCase : Union[str, Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCAmelCase : int = support_list[:10]
lowerCAmelCase : str = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCAmelCase , lowerCAmelCase : int = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
lowerCAmelCase : Any = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
lowerCAmelCase : Dict = res[1].strip()
if sec_titles == "":
lowerCAmelCase : Optional[Any] = """[{}]({})""".format(res[0], wiki_url)
else:
lowerCAmelCase : Tuple = sec_titles.split(""" & """)
lowerCAmelCase : List[str] = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCAmelCase : Tuple = find_nearest_training(question)
lowerCAmelCase : List[str] = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
lowerCAmelCase : int = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
lowerCAmelCase : Tuple = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 543 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : int = """van"""
def __init__( self : List[str] , _UpperCamelCase : List[Any]=224 , _UpperCamelCase : Any=3 , _UpperCamelCase : Optional[int]=[7, 3, 3, 3] , _UpperCamelCase : Tuple=[4, 2, 2, 2] , _UpperCamelCase : Any=[64, 128, 320, 512] , _UpperCamelCase : str=[3, 3, 12, 3] , _UpperCamelCase : str=[8, 8, 4, 4] , _UpperCamelCase : Optional[int]="gelu" , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : Any=1e-6 , _UpperCamelCase : List[Any]=1e-2 , _UpperCamelCase : Optional[int]=0.0 , _UpperCamelCase : Optional[int]=0.0 , **_UpperCamelCase : Union[str, Any] , ):
super().__init__(**_UpperCamelCase)
_lowercase: str = image_size
_lowercase: Optional[int] = num_channels
_lowercase: Optional[Any] = patch_sizes
_lowercase: Union[str, Any] = strides
_lowercase: Any = hidden_sizes
_lowercase: str = depths
_lowercase: str = mlp_ratios
_lowercase: str = hidden_act
_lowercase: Tuple = initializer_range
_lowercase: Union[str, Any] = layer_norm_eps
_lowercase: List[Any] = layer_scale_init_value
_lowercase: List[str] = drop_path_rate
_lowercase: Optional[Any] = dropout_rate
| 702 |
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
_SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def __lowerCAmelCase ( ):
# Get the sagemaker specific mp parameters from smp_options variable.
_lowercase: Dict = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
_lowercase: int = json.loads(__magic_name__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
_lowercase: List[str] = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
_lowercase: Dict = json.loads(__magic_name__ )
if not mpi_options.get("sagemaker_mpi_enabled" , __magic_name__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class A ( lowerCamelCase_ ):
'''simple docstring'''
lowerCamelCase : str = field(
default="""""" , metadata={"""help""": """Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"""} , )
def UpperCAmelCase__ ( self : int):
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , _UpperCamelCase , )
@cached_property
def UpperCAmelCase__ ( self : Dict):
logger.info("PyTorch: setting up devices")
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch")
if self.no_cuda:
_lowercase: Optional[Any] = torch.device("cpu")
_lowercase: List[str] = 0
elif is_sagemaker_model_parallel_available():
_lowercase: Any = smp.local_rank()
_lowercase: Tuple = torch.device("cuda" , _UpperCamelCase)
_lowercase: int = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta)
_lowercase: str = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK"))
_lowercase: Optional[int] = torch.device("cuda" , self.local_rank)
_lowercase: Dict = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
_lowercase: int = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
_lowercase: Dict = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta)
_lowercase: Optional[Any] = torch.device("cuda" , self.local_rank)
_lowercase: str = 1
if device.type == "cuda":
torch.cuda.set_device(_UpperCamelCase)
return device
@property
def UpperCAmelCase__ ( self : str):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase__ ( self : Tuple):
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase__ ( self : Tuple):
return False
| 206 | 0 |
"""simple docstring"""
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"tensor(bool)": np.bool_,
"tensor(int8)": np.inta,
"tensor(uint8)": np.uinta,
"tensor(int16)": np.intaa,
"tensor(uint16)": np.uintaa,
"tensor(int32)": np.intaa,
"tensor(uint32)": np.uintaa,
"tensor(int64)": np.intaa,
"tensor(uint64)": np.uintaa,
"tensor(float16)": np.floataa,
"tensor(float)": np.floataa,
"tensor(double)": np.floataa,
}
class _UpperCamelCase :
def __init__(self , lowerCamelCase__=None , **lowerCamelCase__ ):
"""simple docstring"""
logger.info("""`diffusers.OnnxRuntimeModel` is experimental and might change in the future.""" )
A__ = model
A__ = kwargs.get("""model_save_dir""" , lowerCamelCase__ )
A__ = kwargs.get("""latest_model_name""" , lowerCamelCase__ )
def __call__(self , **lowerCamelCase__ ):
"""simple docstring"""
A__ = {k: np.array(lowerCamelCase__ ) for k, v in kwargs.items()}
return self.model.run(lowerCamelCase__ , lowerCamelCase__ )
@staticmethod
def A (lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
"""simple docstring"""
if provider is None:
logger.info("""No onnxruntime provider specified, using CPUExecutionProvider""" )
A__ = """CPUExecutionProvider"""
return ort.InferenceSession(lowerCamelCase__ , providers=[provider] , sess_options=lowerCamelCase__ )
def A (self , lowerCamelCase__ , lowerCamelCase__ = None , **lowerCamelCase__ ):
"""simple docstring"""
A__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
A__ = self.model_save_dir.joinpath(self.latest_model_name )
A__ = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
A__ = self.model_save_dir.joinpath(lowerCamelCase__ )
if src_path.exists():
A__ = Path(lowerCamelCase__ ).joinpath(lowerCamelCase__ )
try:
shutil.copyfile(lowerCamelCase__ , lowerCamelCase__ )
except shutil.SameFileError:
pass
def A (self , lowerCamelCase__ , **lowerCamelCase__ , ):
"""simple docstring"""
if os.path.isfile(lowerCamelCase__ ):
logger.error(F"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
# saving model weights/files
self._save_pretrained(lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def A (cls , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = False , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
A__ = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(lowerCamelCase__ ):
A__ = OnnxRuntimeModel.load_model(
os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
A__ = Path(lowerCamelCase__ )
# load model from hub
else:
# download model
A__ = hf_hub_download(
repo_id=lowerCamelCase__ , filename=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , )
A__ = Path(lowerCamelCase__ ).parent
A__ = Path(lowerCamelCase__ ).name
A__ = OnnxRuntimeModel.load_model(lowerCamelCase__ , provider=lowerCamelCase__ , sess_options=lowerCamelCase__ )
return cls(model=lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def A (cls , lowerCamelCase__ , lowerCamelCase__ = True , lowerCamelCase__ = None , lowerCamelCase__ = None , **lowerCamelCase__ , ):
"""simple docstring"""
A__ = None
if len(str(lowerCamelCase__ ).split("""@""" ) ) == 2:
A__ ,A__ = model_id.split("""@""" )
return cls._from_pretrained(
model_id=lowerCamelCase__ , revision=lowerCamelCase__ , cache_dir=lowerCamelCase__ , force_download=lowerCamelCase__ , use_auth_token=lowerCamelCase__ , **lowerCamelCase__ , )
| 574 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class _UpperCamelCase ( __snake_case):
__lowerCamelCase = "xlm-roberta-xl"
def __init__(self , lowerCamelCase__=2_5_0_8_8_0 , lowerCamelCase__=2_5_6_0 , lowerCamelCase__=3_6 , lowerCamelCase__=3_2 , lowerCamelCase__=1_0_2_4_0 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=5_1_4 , lowerCamelCase__=1 , lowerCamelCase__=0.0_2 , lowerCamelCase__=1E-05 , lowerCamelCase__=1 , lowerCamelCase__=0 , lowerCamelCase__=2 , lowerCamelCase__="absolute" , lowerCamelCase__=True , lowerCamelCase__=None , **lowerCamelCase__ , ):
"""simple docstring"""
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCamelCase ( __snake_case):
@property
def A (self ):
"""simple docstring"""
if self.task == "multiple-choice":
A__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 574 | 1 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_snake_case = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_snake_case = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_snake_case = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def snake_case ( _a: str )-> dict[str, int]:
'''simple docstring'''
lowerCamelCase__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def snake_case ( _a: tuple )-> str:
'''simple docstring'''
return x[0]
def snake_case ( _a: str )-> str:
'''simple docstring'''
lowerCamelCase__ = get_letter_count(_a )
lowerCamelCase__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(_a )
lowerCamelCase__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=_a )
lowerCamelCase__ = ''.join(freq_to_letter[freq] )
lowerCamelCase__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=_a , reverse=_a )
lowerCamelCase__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(_a )
def snake_case ( _a: str )-> int:
'''simple docstring'''
lowerCamelCase__ = get_frequency_order(_a )
lowerCamelCase__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 659 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 659 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : str = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ : List[Any] = "mobilenet_v2"
def __init__( self , a__=3 , a__=224 , a__=1.0 , a__=8 , a__=8 , a__=6 , a__=32 , a__=True , a__=True , a__="relu6" , a__=True , a__=0.8 , a__=0.0_2 , a__=0.0_0_1 , a__=255 , **a__ , ) -> Dict:
'''simple docstring'''
super().__init__(**a__ )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = depth_divisible_by
snake_case_ = min_depth
snake_case_ = expand_ratio
snake_case_ = output_stride
snake_case_ = first_layer_is_expansion
snake_case_ = finegrained_output
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
snake_case_ = semantic_loss_ignore_index
class _snake_case ( SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase_ : str = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def lowerCAmelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def lowerCAmelCase__ ( self ) -> float:
'''simple docstring'''
return 1e-4
| 400 |
def lowercase ( SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE )
while cur > 1:
# Find the maximum number in arr
SCREAMING_SNAKE_CASE_ = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
SCREAMING_SNAKE_CASE_ = arr[mi::-1] + arr[mi + 1 : len(SCREAMING_SNAKE_CASE )]
# Reverse whole list
SCREAMING_SNAKE_CASE_ = arr[cur - 1 :: -1] + arr[cur : len(SCREAMING_SNAKE_CASE )]
cur -= 1
return arr
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE__ : Optional[int] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 205 | 0 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[Any]=10 , UpperCamelCase__ : int=3 , UpperCamelCase__ : int=2 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : Dict=5 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Any=37 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : int="divided_space_time" , UpperCamelCase__ : Tuple=None , ):
A__ : str =parent
A__ : str =batch_size
A__ : Any =image_size
A__ : Union[str, Any] =num_channels
A__ : str =patch_size
A__ : Union[str, Any] =num_frames
A__ : Any =is_training
A__ : Optional[int] =use_labels
A__ : Optional[int] =hidden_size
A__ : Union[str, Any] =num_hidden_layers
A__ : List[str] =num_attention_heads
A__ : Tuple =intermediate_size
A__ : List[Any] =hidden_act
A__ : str =hidden_dropout_prob
A__ : Optional[Any] =attention_probs_dropout_prob
A__ : Dict =attention_type
A__ : str =initializer_range
A__ : str =scope
A__ : int =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
A__ : Optional[Any] =(image_size // patch_size) ** 2
A__ : List[Any] =(num_frames) * self.num_patches_per_frame + 1
def _UpperCAmelCase ( self : str ):
A__ : Dict =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
A__ : List[Any] =None
if self.use_labels:
A__ : List[str] =ids_tensor([self.batch_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
A__ : Tuple =self.num_labels
return config
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] ):
A__ : Union[str, Any] =TimesformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Union[str, Any] =model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] ):
A__ : Union[str, Any] =TimesformerForVideoClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : int =model(UpperCamelCase__ )
# verify the logits shape
A__ : Optional[int] =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : int =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : int ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__magic_name__ : Optional[Any] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__magic_name__ : int = False
__magic_name__ : Optional[Any] = False
__magic_name__ : int = False
__magic_name__ : Tuple = False
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[Any] =TimesformerModelTester(self )
A__ : int =ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str]=False ):
A__ : str =copy.deepcopy(UpperCamelCase__ )
if return_labels:
if model_class in get_values(UpperCamelCase__ ):
A__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase__ )
return inputs_dict
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Union[str, Any] ):
pass
def _UpperCAmelCase ( self : Tuple ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Dict =model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A__ : Any =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ , A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] =model_class(UpperCamelCase__ )
A__ : Dict =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Any =[*signature.parameters.keys()]
A__ : Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
A__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Dict =TimesformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict ):
if not self.has_attentions:
pass
else:
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Optional[Any] =True
for model_class in self.all_model_classes:
A__ : Tuple =self.model_tester.seq_length
A__ : Optional[int] =self.model_tester.num_frames
A__ : List[Any] =True
A__ : Optional[Any] =False
A__ : List[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Tuple =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Any =True
A__ : int =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : str =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : List[Any] =True
A__ : Optional[Any] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[int] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _UpperCAmelCase ( self : Any ):
def check_hidden_states_output(UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ):
A__ : Any =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : int =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : Optional[int] =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ : List[Any] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Any =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : Any =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ : Union[str, Any] =np.load(UpperCamelCase )
return list(UpperCamelCase )
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : List[Any] ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Any =TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
UpperCamelCase__ )
A__ : Dict =self.default_image_processor
A__ : Tuple =prepare_video()
A__ : Dict =image_processor(video[:8] , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
A__ : Optional[int] =model(**UpperCamelCase__ )
# verify the logits
A__ : Optional[Any] =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Dict =torch.tensor([-0.3016, -0.7713, -0.4205] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 595 | """simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Dict =VideoMAEConfig()
set_architecture_configs(UpperCamelCase , UpperCamelCase )
if "finetuned" not in model_name:
A__ : Optional[int] =False
if "finetuned" in model_name:
A__ : Tuple ="huggingface/label-files"
if "kinetics" in model_name:
A__ : List[Any] =400
A__ : str ="kinetics400-id2label.json"
elif "ssv2" in model_name:
A__ : Optional[Any] =174
A__ : Optional[Any] ="something-something-v2-id2label.json"
else:
raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned." )
A__ : str =json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="dataset" ) , "r" ) )
A__ : List[Any] ={int(UpperCamelCase ): v for k, v in idalabel.items()}
A__ : Optional[Any] =idalabel
A__ : Union[str, Any] ={v: k for k, v in idalabel.items()}
return config
def lowercase ( UpperCamelCase : int , UpperCamelCase : List[Any] ):
"""simple docstring"""
if "small" in model_name:
A__ : Any =384
A__ : Tuple =1536
A__ : Dict =12
A__ : Optional[Any] =16
A__ : int =12
A__ : Tuple =3
A__ : Union[str, Any] =192
A__ : Optional[Any] =768
elif "large" in model_name:
A__ : Optional[int] =1024
A__ : Optional[int] =4096
A__ : int =24
A__ : Tuple =16
A__ : int =12
A__ : Any =8
A__ : Optional[int] =512
A__ : List[str] =2048
elif "huge" in model_name:
A__ : List[Any] =1280
A__ : int =5120
A__ : List[str] =32
A__ : Dict =16
A__ : int =12
A__ : Dict =8
A__ : Optional[int] =640
A__ : Tuple =2560
elif "base" not in model_name:
raise ValueError("Model name should include either \"small\", \"base\", \"large\", or \"huge\"" )
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
if "encoder." in name:
A__ : List[Any] =name.replace("encoder." , "" )
if "cls_token" in name:
A__ : int =name.replace("cls_token" , "videomae.embeddings.cls_token" )
if "decoder_pos_embed" in name:
A__ : int =name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
A__ : Any =name.replace("pos_embed" , "videomae.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
A__ : Optional[Any] =name.replace("patch_embed.proj" , "videomae.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
A__ : Optional[Any] =name.replace("patch_embed.norm" , "videomae.embeddings.norm" )
if "decoder.blocks" in name:
A__ : int =name.replace("decoder.blocks" , "decoder.decoder_layers" )
if "blocks" in name:
A__ : Union[str, Any] =name.replace("blocks" , "videomae.encoder.layer" )
if "attn.proj" in name:
A__ : Union[str, Any] =name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name and "bias" not in name:
A__ : Any =name.replace("attn" , "attention.self" )
if "attn" in name:
A__ : Optional[int] =name.replace("attn" , "attention.attention" )
if "norm1" in name:
A__ : Optional[Any] =name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
A__ : Any =name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
A__ : int =name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
A__ : List[Any] =name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
A__ : Tuple =name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
A__ : str =name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
A__ : Optional[int] =name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
A__ : str =name.replace("norm.weight" , "videomae.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
A__ : Any =name.replace("norm.bias" , "videomae.layernorm.bias" )
if "head" in name and "decoder" not in name:
A__ : Union[str, Any] =name.replace("head" , "classifier" )
return name
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
A__ : Any =orig_state_dict.pop(UpperCamelCase )
if key.startswith("encoder." ):
A__ : Tuple =key.replace("encoder." , "" )
if "qkv" in key:
A__ : Optional[Any] =key.split("." )
if key.startswith("decoder.blocks" ):
A__ : Optional[Any] =config.decoder_hidden_size
A__ : Tuple =int(key_split[2] )
A__ : str ="decoder.decoder_layers."
if "weight" in key:
A__ : Optional[Any] =val[:dim, :]
A__ : int =val[dim : dim * 2, :]
A__ : str =val[-dim:, :]
else:
A__ : Union[str, Any] =config.hidden_size
A__ : Any =int(key_split[1] )
A__ : List[Any] ="videomae.encoder.layer."
if "weight" in key:
A__ : int =val[:dim, :]
A__ : Tuple =val[dim : dim * 2, :]
A__ : Any =val[-dim:, :]
else:
A__ : Dict =val
return orig_state_dict
def lowercase ( ):
"""simple docstring"""
A__ : int =hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
A__ : Optional[Any] =np.load(UpperCamelCase )
return list(UpperCamelCase )
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : Tuple =get_videomae_config(UpperCamelCase )
if "finetuned" in model_name:
A__ : Dict =VideoMAEForVideoClassification(UpperCamelCase )
else:
A__ : List[Any] =VideoMAEForPreTraining(UpperCamelCase )
# download original checkpoint, hosted on Google Drive
A__ : List[str] ="pytorch_model.bin"
gdown.cached_download(UpperCamelCase , UpperCamelCase , quiet=UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase , map_location="cpu" )
if "model" in files:
A__ : Dict =files["model"]
else:
A__ : Optional[int] =files["module"]
A__ : Union[str, Any] =convert_state_dict(UpperCamelCase , UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# verify model on basic input
A__ : Dict =VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
A__ : Dict =prepare_video()
A__ : List[Any] =image_processor(UpperCamelCase , return_tensors="pt" )
if "finetuned" not in model_name:
A__ : Any =hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt" )
A__ : Any =torch.load(UpperCamelCase )
A__ : List[Any] =model(**UpperCamelCase )
A__ : int =outputs.logits
A__ : str =[
"videomae-small-finetuned-kinetics",
"videomae-small-finetuned-ssv2",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"videomae-base-short",
"videomae-base-short-finetuned-kinetics",
"videomae-base",
"videomae-base-finetuned-kinetics",
"videomae-large",
"videomae-large-finetuned-kinetics",
"videomae-huge-finetuned-kinetics",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"videomae-base-short-ssv2",
"videomae-base-short-finetuned-ssv2",
"videomae-base-ssv2",
"videomae-base-finetuned-ssv2",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
A__ : Any =torch.Size([1, 400] )
A__ : Union[str, Any] =torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
A__ : Tuple =torch.Size([1, 174] )
A__ : Any =torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
A__ : List[Any] =torch.Size([1, 1408, 1536] )
A__ : Union[str, Any] =torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
A__ : str =torch.Size([1, 1408, 1536] )
A__ : Optional[Any] =torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
A__ : Any =torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
A__ : Tuple =torch.Size([1, 1408, 1536] )
A__ : Optional[int] =torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
A__ : str =torch.Size([1, 400] )
A__ : Any =torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
A__ : Union[str, Any] =torch.Size([1, 400] )
A__ : List[Any] =torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
A__ : str =torch.Size([1, 400] )
A__ : Optional[Any] =torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
A__ : Union[str, Any] =torch.Size([1, 400] )
A__ : Optional[Any] =torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
A__ : Optional[int] =torch.Size([1, 1408, 1536] )
A__ : Any =torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
A__ : Optional[int] =torch.Size([1, 174] )
A__ : Tuple =torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
A__ : Optional[int] =torch.Size([1, 1408, 1536] )
A__ : Any =torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
A__ : Union[str, Any] =torch.Size([1, 174] )
A__ : Any =torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(F'''Model name not supported. Should be one of {model_names}''' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , UpperCamelCase , atol=1E-4 )
else:
print("Logits:" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Logits ok!" )
# verify loss, if applicable
if model_name == "videomae-base-short":
A__ : Union[str, Any] =outputs.loss
assert torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-4 )
print("Loss ok!" )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if push_to_hub:
print("Pushing to the hub..." )
model.push_to_hub(UpperCamelCase , organization="nielsr" )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4",
type=str,
help=(
"URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct"
" download link."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="/Users/nielsrogge/Documents/VideoMAE/Test",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.")
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__A : Union[str, Any] = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 595 | 1 |
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase: Optional[int] = logging.get_logger(__name__)
_lowerCAmelCase: Any = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowercase_ (lowercase__ ):
snake_case ='segformer'
def __init__( self , lowercase_=3 , lowercase_=4 , lowercase_=[2, 2, 2, 2] , lowercase_=[8, 4, 2, 1] , lowercase_=[32, 64, 160, 256] , lowercase_=[7, 3, 3, 3] , lowercase_=[4, 2, 2, 2] , lowercase_=[1, 2, 5, 8] , lowercase_=[4, 4, 4, 4] , lowercase_="gelu" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=0.02 , lowercase_=0.1 , lowercase_=1e-6 , lowercase_=256 , lowercase_=255 , **lowercase_ , ) -> str:
super().__init__(**lowercase_)
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , lowercase_ , )
a__ =num_channels
a__ =num_encoder_blocks
a__ =depths
a__ =sr_ratios
a__ =hidden_sizes
a__ =patch_sizes
a__ =strides
a__ =mlp_ratios
a__ =num_attention_heads
a__ =hidden_act
a__ =hidden_dropout_prob
a__ =attention_probs_dropout_prob
a__ =classifier_dropout_prob
a__ =initializer_range
a__ =drop_path_rate
a__ =layer_norm_eps
a__ =decoder_hidden_size
a__ =kwargs.get('reshape_last_stage' , lowercase_)
a__ =semantic_loss_ignore_index
class lowercase_ (lowercase__ ):
snake_case =version.parse('1.11' )
@property
def __UpperCamelCase ( self) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def __UpperCamelCase ( self) -> float:
return 1e-4
@property
def __UpperCamelCase ( self) -> int:
return 12
| 20 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase: List[str] = logging.get_logger()
def _lowercase( __a : int , __a : str , __a : LevitConfig , __a : Path , __a : bool = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a__ =timm.create_model('levit_128s' , pretrained=__a )
else:
a__ =timm.create_model('levit_128' , pretrained=__a )
if hidden_sizes == 192:
a__ =timm.create_model('levit_192' , pretrained=__a )
if hidden_sizes == 256:
a__ =timm.create_model('levit_256' , pretrained=__a )
if hidden_sizes == 384:
a__ =timm.create_model('levit_384' , pretrained=__a )
from_model.eval()
a__ =LevitForImageClassificationWithTeacher(__a ).eval()
a__ =OrderedDict()
a__ =from_model.state_dict()
a__ =list(from_model.state_dict().keys() )
a__ =list(our_model.state_dict().keys() )
print(len(__a ) , len(__a ) )
for i in range(len(__a ) ):
a__ =weights[og_keys[i]]
our_model.load_state_dict(__a )
a__ =torch.randn((2, 3, 224, 224) )
a__ =from_model(__a )
a__ =our_model(__a ).logits
assert torch.allclose(__a , __a ), "The model logits don't match the original one."
a__ =name
print(__a )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a__ =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def _lowercase( __a : Path , __a : str = None , __a : bool = True ):
a__ ='imagenet-1k-id2label.json'
a__ =1000
a__ =(1, num_labels)
a__ ='huggingface/label-files'
a__ =num_labels
a__ =json.load(open(hf_hub_download(__a , __a , repo_type='dataset' ) , 'r' ) )
a__ ={int(__a ): v for k, v in idalabel.items()}
a__ =idalabel
a__ ={v: k for k, v in idalabel.items()}
a__ =partial(__a , num_labels=__a , idalabel=__a , labelaid=__a )
a__ ={
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a__ ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __a , names_to_config[model_name] , __a , __a )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __a , __a , __a , __a )
return config, expected_shape
if __name__ == "__main__":
_lowerCAmelCase: Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_lowerCAmelCase: Union[str, Any] = parser.parse_args()
_lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 20 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCAmelCase = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 714 |
'''simple docstring'''
def lowerCAmelCase_ ( __A : int = 1_00 ):
'''simple docstring'''
snake_case: List[str] = n * (n + 1) * (2 * n + 1) / 6
snake_case: List[Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }') | 692 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
UpperCamelCase_ : Tuple = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
UpperCamelCase_ : Optional[Any] = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
UpperCamelCase_ : Dict = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def __a ( _UpperCamelCase: Tuple , _UpperCamelCase: int ) -> Optional[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def __a ( _UpperCamelCase: Union[str, Any] , _UpperCamelCase: Optional[int] , _UpperCamelCase: int="binary" ) -> Any:
"""simple docstring"""
_snake_case = simple_accuracy(_UpperCamelCase , _UpperCamelCase )
_snake_case = float(fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average=_UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def __a ( _UpperCamelCase: str , _UpperCamelCase: Any ) -> Tuple:
"""simple docstring"""
_snake_case = {}
for id_pred, label in zip(_UpperCamelCase , _UpperCamelCase ):
_snake_case = F"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
_snake_case = id_pred["prediction"]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
_snake_case = [(pred, label)]
_snake_case , _snake_case = [], []
for question, preds_labels in question_map.items():
_snake_case , _snake_case = zip(*_UpperCamelCase )
_snake_case = fa_score(y_true=_UpperCamelCase , y_pred=_UpperCamelCase , average="macro" )
fas.append(_UpperCamelCase )
_snake_case = int(sum(pred == label for pred, label in preds_labels ) == len(_UpperCamelCase ) )
ems.append(_UpperCamelCase )
_snake_case = float(sum(_UpperCamelCase ) / len(_UpperCamelCase ) )
_snake_case = sum(_UpperCamelCase ) / len(_UpperCamelCase )
_snake_case = float(fa_score(y_true=_UpperCamelCase , y_pred=[id_pred["prediction"] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
def _lowercase ( self ) -> Union[str, Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="numpy" if not self.config_name == "record" and not self.config_name == "multirc" else None ,)
def _lowercase ( self ) -> Union[str, Any]:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"prediction_text": datasets.Value("string" ),
},
"references": {
"idx": {
"passage": datasets.Value("int64" ),
"query": datasets.Value("int64" ),
},
"answers": datasets.Sequence(datasets.Value("string" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("int64" ),
"paragraph": datasets.Value("int64" ),
"question": datasets.Value("int64" ),
},
"prediction": datasets.Value("int64" ),
},
"references": datasets.Value("int64" ),
}
else:
return {
"predictions": datasets.Value("int64" ),
"references": datasets.Value("int64" ),
}
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}
elif self.config_name == "cb":
return acc_and_fa(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,fa_avg="macro" )
elif self.config_name == "record":
_snake_case = [
{
"qas": [
{"id": ref["idx"]["query"], "answers": [{"text": ans} for ans in ref["answers"]]}
for ref in references
]
}
]
_snake_case = {pred["idx"]["query"]: pred["prediction_text"] for pred in predictions}
return evaluate_record(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )}
else:
raise KeyError(
"You should supply a configuration name selected in "
"[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]" )
| 185 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ : List[Any] = logging.get_logger(__name__)
UpperCamelCase_ : Tuple = {
'''RWKV/rwkv-4-169m-pile''': '''https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-430m-pile''': '''https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-1b5-pile''': '''https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-3b-pile''': '''https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-7b-pile''': '''https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json''',
'''RWKV/rwkv-4-14b-pile''': '''https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json''',
'''RWKV/rwkv-raven-1b5''': '''https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json''',
'''RWKV/rwkv-raven-3b''': '''https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json''',
'''RWKV/rwkv-raven-7b''': '''https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json''',
'''RWKV/rwkv-raven-14b''': '''https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json''',
}
class _a ( __lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ : List[str] = """rwkv"""
SCREAMING_SNAKE_CASE_ : Any = {"""max_position_embeddings""": """context_length"""}
def __init__( self ,_SCREAMING_SNAKE_CASE=50_277 ,_SCREAMING_SNAKE_CASE=1_024 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=32 ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=None ,_SCREAMING_SNAKE_CASE=1e-5 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=6 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,**_SCREAMING_SNAKE_CASE ,) -> List[str]:
_snake_case = vocab_size
_snake_case = context_length
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = attention_hidden_size if attention_hidden_size is not None else hidden_size
_snake_case = intermediate_size if intermediate_size is not None else 4 * hidden_size
_snake_case = layer_norm_epsilon
_snake_case = rescale_every
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
tie_word_embeddings=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
| 185 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
lowercase__ : Optional[Any] = Dict[str, Any]
lowercase__ : Optional[Any] = List[Prediction]
@add_end_docstrings(lowercase_ )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE_ : Tuple , **SCREAMING_SNAKE_CASE_ : List[Any] ):
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(F"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , **SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : int = {}
if "threshold" in kwargs:
lowerCAmelCase_ : List[Any] = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : int , **SCREAMING_SNAKE_CASE_ : Dict ):
return super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Any = load_image(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.IntTensor([[image.height, image.width]] )
lowerCAmelCase_ : Optional[int] = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
lowerCAmelCase_ : int = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
lowerCAmelCase_ : Any = target_size
return inputs
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase_ : List[Any] = model_inputs.pop('target_size' )
lowerCAmelCase_ : Dict = self.model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
lowerCAmelCase_ : Union[str, Any] = model_inputs['bbox']
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple=0.9 ):
lowerCAmelCase_ : Tuple = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
lowerCAmelCase_ ,lowerCAmelCase_ : Optional[int] = target_size[0].tolist()
def unnormalize(SCREAMING_SNAKE_CASE_ : str ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
lowerCAmelCase_ ,lowerCAmelCase_ : int = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
lowerCAmelCase_ : List[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
lowerCAmelCase_ : str = [unnormalize(SCREAMING_SNAKE_CASE_ ) for bbox in model_outputs['bbox'].squeeze(0 )]
lowerCAmelCase_ : Union[str, Any] = ['score', 'label', 'box']
lowerCAmelCase_ : List[str] = [dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) for vals in zip(scores.tolist() , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
lowerCAmelCase_ : Union[str, Any] = self.image_processor.post_process_object_detection(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Dict = raw_annotations[0]
lowerCAmelCase_ : Optional[int] = raw_annotation['scores']
lowerCAmelCase_ : Dict = raw_annotation['labels']
lowerCAmelCase_ : List[Any] = raw_annotation['boxes']
lowerCAmelCase_ : Optional[int] = scores.tolist()
lowerCAmelCase_ : Any = [self.model.config.idalabel[label.item()] for label in labels]
lowerCAmelCase_ : Any = [self._get_bounding_box(SCREAMING_SNAKE_CASE_ ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
lowerCAmelCase_ : int = ['score', 'label', 'box']
lowerCAmelCase_ : Dict = [
dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : "torch.Tensor" ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = box.int().tolist()
lowerCAmelCase_ : Dict = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 317 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
lowercase__ : Union[str, Any] = list[list[float | int]]
def UpperCamelCase_ ( lowerCAmelCase__ : Matrix , lowerCAmelCase__ : Matrix ) -> Matrix:
"""simple docstring"""
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : float
for row in range(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Dict = matrix[row][col]
lowerCAmelCase_ : List[str] = vector[row][0]
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = 0
while row < size and col < size:
# pivoting
lowerCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase__ , lowerCAmelCase__ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase__ ):
lowerCAmelCase_ : Union[str, Any] = augmented[rowa][col] / augmented[row][col]
lowerCAmelCase_ : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase__ ):
for row in range(lowerCAmelCase__ ):
lowerCAmelCase_ : Optional[Any] = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase__ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase__ )
]
def UpperCamelCase_ ( lowerCAmelCase__ : list[int] ) -> Callable[[int], int]:
"""simple docstring"""
lowerCAmelCase_ : int = len(lowerCAmelCase__ )
lowerCAmelCase_ : Matrix = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : Matrix = [[0] for _ in range(lowerCAmelCase__ )]
lowerCAmelCase_ : Matrix
lowerCAmelCase_ : int
lowerCAmelCase_ : int
lowerCAmelCase_ : int
for x_val, y_val in enumerate(lowerCAmelCase__ ):
for col in range(lowerCAmelCase__ ):
lowerCAmelCase_ : List[str] = (x_val + 1) ** (size - col - 1)
lowerCAmelCase_ : List[Any] = y_val
lowerCAmelCase_ : List[str] = solve(lowerCAmelCase__ , lowerCAmelCase__ )
def interpolated_func(lowerCAmelCase__ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase__ ) )
return interpolated_func
def UpperCamelCase_ ( lowerCAmelCase__ : int ) -> int:
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def UpperCamelCase_ ( lowerCAmelCase__ : Callable[[int], int] = question_function , lowerCAmelCase__ : int = 10 ) -> int:
"""simple docstring"""
lowerCAmelCase_ : list[int] = [func(lowerCAmelCase__ ) for x_val in range(1 , order + 1 )]
lowerCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Callable[[int], int]
lowerCAmelCase_ : int
for poly in polynomials:
lowerCAmelCase_ : Union[str, Any] = 1
while func(lowerCAmelCase__ ) == poly(lowerCAmelCase__ ):
x_val += 1
ret += poly(lowerCAmelCase__ )
return ret
if __name__ == "__main__":
print(f'{solution() = }')
| 317 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 537 |
'''simple docstring'''
from math import pi
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
return 2 * pi * radius * (angle / 3_60)
if __name__ == "__main__":
print(arc_length(90, 10))
| 331 | 0 |
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Union[str, Any] = FunnelTokenizer
a : Union[str, Any] = FunnelTokenizerFast
a : Optional[Any] = True
a : Optional[int] = True
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
super().setUp()
__lowercase = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,**_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname ,**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = '''UNwant\u00E9d,running'''
__lowercase = '''unwanted, running'''
return input_text, output_text
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
__lowercase = self.tokenizer_class(self.vocab_file )
__lowercase = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowerCamelCase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) ,[7, 4, 5, 10, 8, 9] )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
__lowercase = tokenizer('''UNwant\u00E9d,running''' )
__lowercase = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] ,[2] + [0] * sentence_len )
__lowercase = tokenizer('''UNwant\u00E9d,running''' ,'''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] ,[2] + [0] * sentence_len + [1] * sentence_len )
| 702 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 56 | 0 |
"""simple docstring"""
import random
def _snake_case ( _snake_case : List[str] , _snake_case : int , _snake_case : Dict ) -> Dict:
'''simple docstring'''
_A = a[left_index]
_A = left_index + 1
for j in range(left_index + 1 , _snake_case ):
if a[j] < pivot:
_A , _A = a[i], a[j]
i += 1
_A , _A = a[i - 1], a[left_index]
return i - 1
def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
if left < right:
_A = random.randint(_snake_case , right - 1 )
_A , _A = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_A = partition(_snake_case , _snake_case , _snake_case )
quick_sort_random(
_snake_case , _snake_case , _snake_case ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_snake_case , pivot_index + 1 , _snake_case ) # recursive quicksort to the right of the pivot point
def _snake_case ( ) -> List[str]:
'''simple docstring'''
_A = input('Enter numbers separated by a comma:\n' ).strip()
_A = [int(_snake_case ) for item in user_input.split(',' )]
quick_sort_random(_snake_case , 0 , len(_snake_case ) )
print(_snake_case )
if __name__ == "__main__":
main()
| 7 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_a = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_a = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_a = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def lowerCAmelCase__(__snake_case ) -> int:
'''simple docstring'''
def remove_articles(__snake_case ):
lowerCamelCase__ = re.compile(R'''\b(a|an|the)\b''' ,re.UNICODE )
return re.sub(__snake_case ,''' ''' ,__snake_case )
def white_space_fix(__snake_case ):
return " ".join(text.split() )
def remove_punc(__snake_case ):
lowerCamelCase__ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__snake_case ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__snake_case ) ) ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
return int(normalize_answer(__snake_case ) == normalize_answer(__snake_case ) )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = [any(compute_exact(__snake_case ,__snake_case ) for ref in refs ) for pred, refs in zip(__snake_case ,__snake_case )]
return (sum(__snake_case ) / len(__snake_case )) * 100
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase__ = scount * numref
lowerCamelCase__ = Counter(__snake_case )
lowerCamelCase__ = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase__ = ccount * numref
# KEEP
lowerCamelCase__ = sgramcounter_rep & cgramcounter_rep
lowerCamelCase__ = keepgramcounter_rep & rgramcounter
lowerCamelCase__ = sgramcounter_rep & rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = keeptmpscorea / len(__snake_case )
if len(__snake_case ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase__ = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase__ = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase__ = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase__ = sgramcounter_rep - cgramcounter_rep
lowerCamelCase__ = delgramcounter_rep - rgramcounter
lowerCamelCase__ = sgramcounter_rep - rgramcounter
lowerCamelCase__ = 0
lowerCamelCase__ = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = deltmpscorea / len(__snake_case )
# ADDITION
lowerCamelCase__ = set(__snake_case ) - set(__snake_case )
lowerCamelCase__ = set(__snake_case ) & set(__snake_case )
lowerCamelCase__ = set(__snake_case ) - set(__snake_case )
lowerCamelCase__ = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase__ = 1
lowerCamelCase__ = 1
if len(__snake_case ) > 0:
lowerCamelCase__ = addtmpscore / len(__snake_case )
if len(__snake_case ) > 0:
lowerCamelCase__ = addtmpscore / len(__snake_case )
lowerCamelCase__ = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase__ = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = len(__snake_case )
lowerCamelCase__ = ssent.split(''' ''' )
lowerCamelCase__ = csent.split(''' ''' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
for rsent in rsents:
lowerCamelCase__ = rsent.split(''' ''' )
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = []
ragramslist.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2]
ragrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = ragrams[i] + ''' ''' + ragrams[i + 1] + ''' ''' + ragrams[i + 2] + ''' ''' + ragrams[i + 3]
ragrams.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
ragramslist.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2]
sagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = sagrams[i] + ''' ''' + sagrams[i + 1] + ''' ''' + sagrams[i + 2] + ''' ''' + sagrams[i + 3]
sagrams.append(__snake_case )
for i in range(0 ,len(__snake_case ) - 1 ):
if i < len(__snake_case ) - 1:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 2:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2]
cagrams.append(__snake_case )
if i < len(__snake_case ) - 3:
lowerCamelCase__ = cagrams[i] + ''' ''' + cagrams[i + 1] + ''' ''' + cagrams[i + 2] + ''' ''' + cagrams[i + 3]
cagrams.append(__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
((lowerCamelCase__) , (lowerCamelCase__) , (lowerCamelCase__)) = SARIngram(__snake_case ,__snake_case ,__snake_case ,__snake_case )
lowerCamelCase__ = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase__ = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase__ = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase__ = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def lowerCAmelCase__(__snake_case ,__snake_case = True ,__snake_case = "13a" ,__snake_case = True ) -> Tuple:
'''simple docstring'''
if lowercase:
lowerCamelCase__ = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase__ = sacrebleu.metrics.bleu._get_tokenizer(__snake_case )()(__snake_case )
else:
lowerCamelCase__ = sacrebleu.TOKENIZERS[tokenizer]()(__snake_case )
elif tokenizer == "moses":
lowerCamelCase__ = sacremoses.MosesTokenizer().tokenize(__snake_case ,return_str=__snake_case ,escape=__snake_case )
elif tokenizer == "penn":
lowerCamelCase__ = sacremoses.MosesTokenizer().penn_tokenize(__snake_case ,return_str=__snake_case )
else:
lowerCamelCase__ = sentence
if not return_str:
lowerCamelCase__ = normalized_sent.split()
return normalized_sent
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
if not (len(__snake_case ) == len(__snake_case ) == len(__snake_case )):
raise ValueError('''Sources length must match predictions and references lengths.''' )
lowerCamelCase__ = 0
for src, pred, refs in zip(__snake_case ,__snake_case ,__snake_case ):
sari_score += SARIsent(normalize(__snake_case ) ,normalize(__snake_case ) ,[normalize(__snake_case ) for sent in refs] )
lowerCamelCase__ = sari_score / len(__snake_case )
return 100 * sari_score
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="exp" ,__snake_case=None ,__snake_case=False ,__snake_case=False ,__snake_case=False ,) -> int:
'''simple docstring'''
lowerCamelCase__ = len(references[0] )
if any(len(__snake_case ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
lowerCamelCase__ = [[refs[i] for refs in references] for i in range(__snake_case )]
lowerCamelCase__ = sacrebleu.corpus_bleu(
__snake_case ,__snake_case ,smooth_method=__snake_case ,smooth_value=__snake_case ,force=__snake_case ,lowercase=__snake_case ,use_effective_order=__snake_case ,)
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=[
'''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''',
'''https://github.com/cocoxu/simplification/blob/master/SARI.py''',
'''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''',
'''https://github.com/mjpost/sacreBLEU''',
] , reference_urls=[
'''https://www.aclweb.org/anthology/Q16-1029.pdf''',
'''https://github.com/mjpost/sacreBLEU''',
'''https://en.wikipedia.org/wiki/BLEU''',
'''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''',
] , )
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = {}
result.update({'''sari''': compute_sari(sources=__lowerCAmelCase , predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
result.update({'''sacrebleu''': compute_sacrebleu(predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
result.update({'''exact''': compute_em(predictions=__lowerCAmelCase , references=__lowerCAmelCase )} )
return result
| 481 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = MobileBertConfig.from_json_file(_lowercase )
print(f"""Building PyTorch model from configuration: {config}""" )
lowerCamelCase__ : List[Any] = MobileBertForPreTraining(_lowercase )
# Load weights from tf checkpoint
lowerCamelCase__ : Any = load_tf_weights_in_mobilebert(_lowercase , _lowercase , _lowercase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _lowercase )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 121 | """simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = len(_lowercase )
while cur > 1:
# Find the maximum number in arr
lowerCamelCase__ : Dict = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
lowerCamelCase__ : Optional[int] = arr[mi::-1] + arr[mi + 1 : len(_lowercase )]
# Reverse whole list
lowerCamelCase__ : Optional[Any] = arr[cur - 1 :: -1] + arr[cur : len(_lowercase )]
cur -= 1
return arr
if __name__ == "__main__":
UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
UpperCAmelCase : Optional[Any] = [int(item) for item in user_input.split(",")]
print(pancake_sort(unsorted))
| 121 | 1 |
# Imports
import numpy as np
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Tuple=None , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Dict=None ):
if red is not None:
_a = red
if green is not None:
_a = green
if blue is not None:
_a = blue
if red_edge is not None:
_a = red_edge
if nir is not None:
_a = nir
return True
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]="" , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : List[Any]=None , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ):
self.set_matricies(red=SCREAMING_SNAKE_CASE_ , green=SCREAMING_SNAKE_CASE_ , blue=SCREAMING_SNAKE_CASE_ , red_edge=SCREAMING_SNAKE_CASE_ , nir=SCREAMING_SNAKE_CASE_ )
_a = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def _UpperCAmelCase ( self : Union[str, Any] ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def _UpperCAmelCase ( self : Union[str, Any] ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _UpperCAmelCase ( self : List[str] ):
return self.nir * (self.red / (self.green**2))
def _UpperCAmelCase ( self : Optional[Any] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _UpperCAmelCase ( self : List[Any] ):
return (self.nir - self.red) / (self.nir + self.red)
def _UpperCAmelCase ( self : int ):
return (self.nir - self.blue) / (self.nir + self.blue)
def _UpperCAmelCase ( self : Optional[int] ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _UpperCAmelCase ( self : str ):
return (self.nir - self.green) / (self.nir + self.green)
def _UpperCAmelCase ( self : Optional[int] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _UpperCAmelCase ( self : Dict ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _UpperCAmelCase ( self : Optional[int] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _UpperCAmelCase ( self : Union[str, Any] ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _UpperCAmelCase ( self : Any , SCREAMING_SNAKE_CASE_ : Dict=0.08 , SCREAMING_SNAKE_CASE_ : List[str]=1.22 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _UpperCAmelCase ( self : Dict ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _UpperCAmelCase ( self : Optional[Any] ):
return (self.nir / self.green) - 1
def _UpperCAmelCase ( self : Any ):
return (self.nir / self.redEdge) - 1
def _UpperCAmelCase ( self : Any ):
return (self.red - self.blue) / self.red
def _UpperCAmelCase ( self : Optional[Any] ):
_a = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _UpperCAmelCase ( self : Dict ):
return self.nir - self.green
def _UpperCAmelCase ( self : List[str] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _UpperCAmelCase ( self : Dict ):
_a = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _UpperCAmelCase ( self : Optional[int] ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : int=None , SCREAMING_SNAKE_CASE_ : Tuple=None ):
return (self.nir - b) / (a * self.red)
def _UpperCAmelCase ( self : Any ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _UpperCAmelCase ( self : List[Any] ):
return (self.red + self.green + self.blue) / 30.5
def _UpperCAmelCase ( self : Optional[Any] ):
return self.nir / self.red
def _UpperCAmelCase ( self : Tuple ):
return (self.rvi() - 1) / (self.rvi() + 1)
def _UpperCAmelCase ( self : Optional[Any] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _UpperCAmelCase ( self : Dict ):
return self.green / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : Tuple ):
return self.nir / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : List[Any] ):
return self.red / (self.nir + self.red + self.green)
def _UpperCAmelCase ( self : Dict ):
return (self.green - self.red) / (self.green + self.red)
def _UpperCAmelCase ( self : Tuple ):
return (self.red - self.green) / (self.red + self.green)
def _UpperCAmelCase ( self : Tuple ):
_a = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_a = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _UpperCAmelCase ( self : Optional[Any] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _UpperCAmelCase ( self : List[str] ):
return self.nir / self.red
def _UpperCAmelCase ( self : Tuple ):
return (self.ndvi() + 0.5) ** (1 / 2)
def _UpperCAmelCase ( self : List[str] ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 562 |
import os
import sys
import unittest
lowercase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowercase_ = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowercase_ = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_test_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = {'BertModelTest': 'BertModelTester'}
_a = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
_a = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_model_to_test_mapping(SCREAMING_SNAKE_CASE_ )
_a = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
_a = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : List[Any] ):
_a = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = get_model_to_tester_mapping(SCREAMING_SNAKE_CASE_ )
_a = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
_a = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(get_test_info.to_json(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
| 562 | 1 |
"""simple docstring"""
def lowercase_ ( _lowercase : list[int] ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = []
if len(_lowercase ) == 1:
return [nums.copy()]
for _ in range(len(_lowercase ) ):
UpperCAmelCase : Union[str, Any] = nums.pop(0 )
UpperCAmelCase : List[str] = permute(_lowercase )
for perm in permutations:
perm.append(_lowercase )
result.extend(_lowercase )
nums.append(_lowercase )
return result
def lowercase_ ( _lowercase : Optional[Any] ):
'''simple docstring'''
def backtrack(_lowercase : Dict ):
if start == len(_lowercase ) - 1:
output.append(nums[:] )
else:
for i in range(_lowercase , len(_lowercase ) ):
UpperCAmelCase , UpperCAmelCase : str = nums[i], nums[start]
backtrack(start + 1 )
UpperCAmelCase , UpperCAmelCase : Tuple = nums[i], nums[start] # backtrack
UpperCAmelCase : Optional[int] = []
backtrack(0 )
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
snake_case_ : Union[str, Any] = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 292 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ : Union[str, Any] = logging.get_logger(__name__)
snake_case_ : Tuple = {"""vocab_file""": """sentencepiece.bpe.model"""}
snake_case_ : List[str] = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
snake_case_ : List[Any] = {
"""camembert-base""": 5_1_2,
}
snake_case_ : Any = """▁"""
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowercase : Union[str, Any] , lowercase : str="<s>" , lowercase : str="</s>" , lowercase : Optional[int]="</s>" , lowercase : Dict="<s>" , lowercase : Optional[Any]="<unk>" , lowercase : List[Any]="<pad>" , lowercase : Any="<mask>" , lowercase : Optional[int]=["<s>NOTUSED", "</s>NOTUSED"] , lowercase : Optional[Dict[str, Any]] = None , **lowercase : Dict , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = AddedToken(lowercase , lstrip=lowercase , rstrip=lowercase ) if isinstance(lowercase , lowercase ) else mask_token
UpperCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowercase , eos_token=lowercase , unk_token=lowercase , sep_token=lowercase , cls_token=lowercase , pad_token=lowercase , mask_token=lowercase , additional_special_tokens=lowercase , sp_model_kwargs=self.sp_model_kwargs , **lowercase , )
UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase ) )
UpperCAmelCase : str = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
UpperCAmelCase : Union[str, Any] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
UpperCAmelCase : Union[str, Any] = len(self.fairseq_tokens_to_ids )
UpperCAmelCase : Dict = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
UpperCAmelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __lowerCAmelCase ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : Dict = [self.cls_token_id]
UpperCAmelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Dict , lowercase : List[int] , lowercase : Optional[List[int]] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase , token_ids_a=lowercase , already_has_special_tokens=lowercase )
if token_ids_a is None:
return [1] + ([0] * len(lowercase )) + [1]
return [1] + ([0] * len(lowercase )) + [1, 1] + ([0] * len(lowercase )) + [1]
def __lowerCAmelCase ( self : Tuple , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase : List[Any] = {self.convert_ids_to_tokens(lowercase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str ):
'''simple docstring'''
return self.sp_model.encode(lowercase , out_type=lowercase )
def __lowerCAmelCase ( self : int , lowercase : Optional[Any] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(lowercase ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(lowercase )
def __lowerCAmelCase ( self : Any , lowercase : Dict ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __lowerCAmelCase ( self : Tuple , lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Tuple = []
UpperCAmelCase : Tuple = ""
UpperCAmelCase : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowercase ) + token
UpperCAmelCase : Optional[int] = True
UpperCAmelCase : str = []
else:
current_sub_tokens.append(lowercase )
UpperCAmelCase : Any = False
out_string += self.sp_model.decode(lowercase )
return out_string.strip()
def __getstate__( self : str ):
'''simple docstring'''
UpperCAmelCase : List[str] = self.__dict__.copy()
UpperCAmelCase : Dict = None
return state
def __setstate__( self : List[str] , lowercase : str ):
'''simple docstring'''
UpperCAmelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase : Any = {}
UpperCAmelCase : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase : Optional[Any] = os.path.join(
lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase , "wb" ) as fi:
UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto()
fi.write(lowercase )
return (out_vocab_file,)
| 292 | 1 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_A : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **A_ ):
'''simple docstring'''
super().__init__(**A_ )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , '''vision''' )
self.check_model_type(A_ )
def __call__( self , A_ , A_ = None , **A_ , ):
'''simple docstring'''
if "text_queries" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''text_queries''' )
if isinstance(A_ , (str, Image.Image) ):
SCREAMING_SNAKE_CASE__ = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
SCREAMING_SNAKE_CASE__ = image
SCREAMING_SNAKE_CASE__ = super().__call__(A_ , **A_ )
return results
def lowercase_ ( self , **A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
if "threshold" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs['''threshold''']
if "top_k" in kwargs:
SCREAMING_SNAKE_CASE__ = kwargs['''top_k''']
return {}, {}, postprocess_params
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = load_image(inputs['''image'''] )
SCREAMING_SNAKE_CASE__ = inputs['''candidate_labels''']
if isinstance(A_ , A_ ):
SCREAMING_SNAKE_CASE__ = candidate_labels.split(''',''' )
SCREAMING_SNAKE_CASE__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(A_ ):
SCREAMING_SNAKE_CASE__ = self.tokenizer(A_ , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ = self.image_processor(A_ , return_tensors=self.framework )
yield {
"is_last": i == len(A_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowercase_ ( self , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''target_size''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''candidate_label''' )
SCREAMING_SNAKE_CASE__ = model_inputs.pop('''is_last''' )
SCREAMING_SNAKE_CASE__ = self.model(**A_ )
SCREAMING_SNAKE_CASE__ = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def lowercase_ ( self , A_ , A_=0.1 , A_=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for model_output in model_outputs:
SCREAMING_SNAKE_CASE__ = model_output['''candidate_label''']
SCREAMING_SNAKE_CASE__ = BaseModelOutput(A_ )
SCREAMING_SNAKE_CASE__ = self.image_processor.post_process_object_detection(
outputs=A_ , threshold=A_ , target_sizes=model_output['''target_size'''] )[0]
for index in outputs["scores"].nonzero():
SCREAMING_SNAKE_CASE__ = outputs['''scores'''][index].item()
SCREAMING_SNAKE_CASE__ = self._get_bounding_box(outputs['''boxes'''][index][0] )
SCREAMING_SNAKE_CASE__ = {'''score''': score, '''label''': label, '''box''': box}
results.append(A_ )
SCREAMING_SNAKE_CASE__ = sorted(A_ , key=lambda A_ : x["score"] , reverse=A_ )
if top_k:
SCREAMING_SNAKE_CASE__ = results[:top_k]
return results
def lowercase_ ( self , A_ ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = box.int().tolist()
SCREAMING_SNAKE_CASE__ = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 100 |
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = use_mc_token_ids
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = self.vocab_size - 1
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_mc_token_ids:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ = self.get_config()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowercase_ ( self ):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModel(config=A_ )
model.to(A_ )
model.eval()
model(A_ , token_type_ids=A_ , head_mask=A_ )
model(A_ , token_type_ids=A_ )
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''head_mask''': head_mask}
return config, inputs_dict
def lowercase_ ( self , A_ , A_ , A_ , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = CTRLForSequenceClassification(A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = model(A_ , token_type_ids=A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Union[str, Any] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCamelCase__ : List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCamelCase__ : Tuple = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Any = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , n_embd=37 )
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*A_ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = CTRLModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase_ ( self ):
'''simple docstring'''
pass
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def lowercase_ ( self ):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = CTRLLMHeadModel.from_pretrained('''ctrl''' )
model.to(A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=A_ ) # Legal the president is
SCREAMING_SNAKE_CASE__ = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
SCREAMING_SNAKE_CASE__ = model.generate(A_ , do_sample=A_ )
self.assertListEqual(output_ids[0].tolist() , A_ )
| 100 | 1 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
class UpperCamelCase ( lowercase__ ):
'''simple docstring'''
lowercase : List[str] =["""pixel_values"""]
def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 255 , UpperCamelCase_ = True , UpperCamelCase_ = 8 , **UpperCamelCase_ , ):
super().__init__(**UpperCamelCase_ )
lowercase_ :int = do_rescale
lowercase_ :Optional[Any] = rescale_factor
lowercase_ :List[str] = do_pad
lowercase_ :Any = pad_size
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ):
return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None ):
lowercase_ :Any = get_image_size(UpperCamelCase_ )
lowercase_ :Optional[Any] = (old_height // size + 1) * size - old_height
lowercase_ :Tuple = (old_width // size + 1) * size - old_width
return pad(UpperCamelCase_ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=UpperCamelCase_ )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ):
lowercase_ :Any = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ :int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ :Tuple = do_pad if do_pad is not None else self.do_pad
lowercase_ :List[Any] = pad_size if pad_size is not None else self.pad_size
lowercase_ :Union[str, Any] = make_list_of_images(UpperCamelCase_ )
if not valid_images(UpperCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
lowercase_ :str = [to_numpy_array(UpperCamelCase_ ) for image in images]
if do_rescale:
lowercase_ :Any = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images]
if do_pad:
lowercase_ :List[Any] = [self.pad(UpperCamelCase_ , size=UpperCamelCase_ ) for image in images]
lowercase_ :Dict = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images]
lowercase_ :Optional[Any] = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
| 721 |
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
SCREAMING_SNAKE_CASE : Any = 250_004
SCREAMING_SNAKE_CASE : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : Union[str, Any] =MBartTokenizer
lowercase : Dict =MBartTokenizerFast
lowercase : Dict =True
lowercase : List[Any] =True
def UpperCamelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self ):
lowercase_ :Optional[Any] = MBartTokenizer(UpperCamelCase_ , keep_accents=UpperCamelCase_ )
lowercase_ :Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(UpperCamelCase_ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase_ :List[str] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowercase_ :Dict = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowercase_ :Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase_ )
self.assertListEqual(
UpperCamelCase_ , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def UpperCamelCase ( self ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ :Optional[Any] = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowercase_ :Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :int = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
lowercase_ :Optional[int] = tempfile.mkdtemp()
lowercase_ :Union[str, Any] = tokenizer_r.save_pretrained(UpperCamelCase_ )
lowercase_ :int = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ :Union[str, Any] = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :str = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Optional[Any] = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=True
lowercase_ :int = tempfile.mkdtemp()
lowercase_ :Optional[Any] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase_ , UpperCamelCase_ )
# Checks everything loads correctly in the same way
lowercase_ :Optional[int] = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Dict = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
# Save tokenizer rust, legacy_format=False
lowercase_ :Tuple = tempfile.mkdtemp()
lowercase_ :Optional[int] = tokenizer_r.save_pretrained(UpperCamelCase_ , legacy_format=UpperCamelCase_ )
lowercase_ :Tuple = tokenizer_p.save_pretrained(UpperCamelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ :int = tokenizer_r.from_pretrained(UpperCamelCase_ )
lowercase_ :Any = tokenizer_p.from_pretrained(UpperCamelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase_ , UpperCamelCase_ ) )
shutil.rmtree(UpperCamelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowercase : List[Any] ="""facebook/mbart-large-en-ro"""
lowercase : Optional[Any] =[
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowercase : Dict =[
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowercase : Tuple =[8274, 127873, 25916, 7, 8622, 2071, 438, 67485, 53, 187895, 23, 51712, 2, EN_CODE]
@classmethod
def UpperCamelCase ( cls ):
lowercase_ :MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase_ :Optional[int] = 1
return cls
def UpperCamelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 25_0001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 25_0004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 25_0020 )
def UpperCamelCase ( self ):
lowercase_ :Dict = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertIn(UpperCamelCase_ , self.tokenizer.all_special_ids )
lowercase_ :List[Any] = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2]
lowercase_ :str = self.tokenizer.decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
lowercase_ :List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Dict = ['''this is gunna be a long sentence ''' * 20]
assert isinstance(src_text[0] , UpperCamelCase_ )
lowercase_ :Dict = 10
lowercase_ :Any = self.tokenizer(UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase_ )
self.assertEqual(len(UpperCamelCase_ ) , UpperCamelCase_ )
def UpperCamelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [25_0026, 25_0001] )
def UpperCamelCase ( self ):
lowercase_ :List[Any] = tempfile.mkdtemp()
lowercase_ :List[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase_ )
lowercase_ :Tuple = MBartTokenizer.from_pretrained(UpperCamelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase_ )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , return_tensors='''pt''' )
lowercase_ :Union[str, Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ :Tuple = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowercase_ :List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def UpperCamelCase ( self ):
lowercase_ :Any = self.tokenizer(self.src_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=3 , return_tensors='''pt''' )
lowercase_ :int = self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=10 , return_tensors='''pt''' )
lowercase_ :Optional[int] = targets['''input_ids''']
lowercase_ :List[Any] = shift_tokens_right(UpperCamelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCamelCase ( self ):
lowercase_ :int = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
# A, test, EOS, en_XX
'''input_ids''': [[62, 3034, 2, 25_0004]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 25_0001,
} , )
| 441 | 0 |
"""simple docstring"""
import unittest
import torch
from torch import nn
from diffusers.models.activations import get_activation
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : int = get_activation("""swish""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : int = get_activation("""silu""" )
self.assertIsInstance(UpperCamelCase , nn.SiLU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = get_activation("""mish""" )
self.assertIsInstance(UpperCamelCase , nn.Mish )
self.assertEqual(act(torch.tensor(-200 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
__UpperCAmelCase : str = get_activation("""gelu""" )
self.assertIsInstance(UpperCamelCase , nn.GELU )
self.assertEqual(act(torch.tensor(-100 , dtype=torch.floataa ) ).item() , 0 )
self.assertNotEqual(act(torch.tensor(-1 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(0 , dtype=torch.floataa ) ).item() , 0 )
self.assertEqual(act(torch.tensor(20 , dtype=torch.floataa ) ).item() , 20 )
| 139 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( _UpperCamelCase : Callable , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> np.array:
'''simple docstring'''
__UpperCAmelCase : Tuple = int(np.ceil((x_end - xa) / step_size ) )
__UpperCAmelCase : Optional[int] = np.zeros((n + 1,) )
__UpperCAmelCase : List[Any] = ya
__UpperCAmelCase : List[Any] = xa
for k in range(_UpperCamelCase ):
__UpperCAmelCase : str = y[k] + step_size * ode_func(_UpperCamelCase , y[k] )
__UpperCAmelCase : Tuple = y[k] + (
(step_size / 2) * (ode_func(_UpperCamelCase , y[k] ) + ode_func(x + step_size , _UpperCamelCase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 139 | 1 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
_UpperCamelCase : List[str] =[
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_UpperCamelCase : List[str] =[[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = []
for i in range(len(A_ ) ):
__lowerCamelCase = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__lowerCamelCase = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(A_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(A_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(A_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__lowerCamelCase = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(A_ )
return next_generation
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = []
for _ in range(A_ ):
# Create output image
__lowerCamelCase = Image.new('''RGB''' , (len(cells[0] ), len(A_ )) )
__lowerCamelCase = img.load()
# Save cells to image
for x in range(len(A_ ) ):
for y in range(len(cells[0] ) ):
__lowerCamelCase = 2_55 - cells[y][x] * 2_55
__lowerCamelCase = (colour, colour, colour)
# Save image
images.append(A_ )
__lowerCamelCase = new_generation(A_ )
return images
if __name__ == "__main__":
_UpperCamelCase : int =generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 702 |
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCamelCase : List[str] ="platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = PegasusConfig
SCREAMING_SNAKE_CASE_ = {}
SCREAMING_SNAKE_CASE_ = 'gelu'
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=False , _snake_case=99 , _snake_case=32 , _snake_case=5 , _snake_case=4 , _snake_case=37 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=20 , _snake_case=2 , _snake_case=1 , _snake_case=0 , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = eos_token_id
__lowerCamelCase = pad_token_id
__lowerCamelCase = bos_token_id
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__lowerCamelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__lowerCamelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__lowerCamelCase = prepare_pegasus_inputs_dict(_snake_case , _snake_case , _snake_case )
return config, inputs_dict
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
__lowerCamelCase , __lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
__lowerCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_snake_case , )
__lowerCamelCase = model.decode(_snake_case , _snake_case )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
__lowerCamelCase = 20
__lowerCamelCase = model_class_name(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] )
__lowerCamelCase , __lowerCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
__lowerCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__lowerCamelCase = model.init_cache(decoder_input_ids.shape[0] , _snake_case , _snake_case )
__lowerCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__lowerCamelCase = model.decode(
decoder_input_ids[:, :-1] , _snake_case , decoder_attention_mask=_snake_case , past_key_values=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
__lowerCamelCase = model.decode(
decoder_input_ids[:, -1:] , _snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_snake_case , decoder_position_ids=_snake_case , )
__lowerCamelCase = model.decode(_snake_case , _snake_case , decoder_attention_mask=_snake_case )
__lowerCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' )
def lowerCamelCase_ ( A_ , A_ , A_ , A_=None , A_=None , ):
if attention_mask is None:
__lowerCamelCase = np.not_equal(A_ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__lowerCamelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = FlaxPegasusModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = self._prepare_for_class(_snake_case , _snake_case )
__lowerCamelCase = model_class(_snake_case )
@jax.jit
def encode_jitted(_snake_case , _snake_case=None , **_snake_case ):
return model.encode(input_ids=_snake_case , attention_mask=_snake_case )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = encode_jitted(**_snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = encode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase = model_class(_snake_case )
__lowerCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
__lowerCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_snake_case , _snake_case , _snake_case ):
return model.decode(
decoder_input_ids=_snake_case , decoder_attention_mask=_snake_case , encoder_outputs=_snake_case , )
with self.subTest('''JIT Enabled''' ):
__lowerCamelCase = decode_jitted(**_snake_case ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
__lowerCamelCase = decode_jitted(**_snake_case ).to_tuple()
self.assertEqual(len(_snake_case ) , len(_snake_case ) )
for jitted_output, output in zip(_snake_case , _snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
__lowerCamelCase = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_snake_case )
__lowerCamelCase = np.ones((1, 1) )
__lowerCamelCase = model(_snake_case )
self.assertIsNotNone(_snake_case )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
__lowerCamelCase = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
__lowerCamelCase = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCamelCase = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
__lowerCamelCase = tokenizer(_snake_case , return_tensors='''np''' , truncation=_snake_case , max_length=5_12 , padding=_snake_case )
__lowerCamelCase = model.generate(**_snake_case , num_beams=2 ).sequences
__lowerCamelCase = tokenizer.batch_decode(_snake_case , skip_special_tokens=_snake_case )
assert tgt_text == decoded
| 575 | 0 |
'''simple docstring'''
import os
UpperCamelCase : int = {'I': 1, 'V': 5, 'X': 10, 'L': 50, 'C': 1_00, 'D': 5_00, 'M': 10_00}
def A__ ( __lowerCAmelCase : str ):
lowerCamelCase__ = 0
lowerCamelCase__ = 0
while index < len(__lowerCAmelCase ) - 1:
lowerCamelCase__ = SYMBOLS[numerals[index]]
lowerCamelCase__ = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def A__ ( __lowerCAmelCase : int ):
lowerCamelCase__ = """"""
lowerCamelCase__ = num // 1000
numerals += m_count * "M"
num %= 1000
lowerCamelCase__ = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCamelCase__ = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def A__ ( __lowerCAmelCase : str = "/p089_roman.txt" ):
lowerCamelCase__ = 0
with open(os.path.dirname(__lowerCAmelCase ) + roman_numerals_filename ) as filea:
lowerCamelCase__ = filea.readlines()
for line in lines:
lowerCamelCase__ = line.strip()
lowerCamelCase__ = parse_roman_numerals(__lowerCAmelCase )
lowerCamelCase__ = generate_roman_numerals(__lowerCAmelCase )
savings += len(__lowerCAmelCase ) - len(__lowerCAmelCase )
return savings
if __name__ == "__main__":
print(F'{solution() = }')
| 50 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Dict = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class UpperCamelCase__ (a ):
'''simple docstring'''
_UpperCamelCase = 'codegen'
_UpperCamelCase = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self ,_lowerCAmelCase=5_04_00 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=20_48 ,_lowerCAmelCase=40_96 ,_lowerCAmelCase=28 ,_lowerCAmelCase=16 ,_lowerCAmelCase=64 ,_lowerCAmelCase=None ,_lowerCAmelCase="gelu_new" ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=0.0 ,_lowerCAmelCase=1E-5 ,_lowerCAmelCase=0.02 ,_lowerCAmelCase=True ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=5_02_56 ,_lowerCAmelCase=False ,**_lowerCAmelCase ,):
lowerCamelCase__ = vocab_size
lowerCamelCase__ = n_ctx
lowerCamelCase__ = n_positions
lowerCamelCase__ = n_embd
lowerCamelCase__ = n_layer
lowerCamelCase__ = n_head
lowerCamelCase__ = n_inner
lowerCamelCase__ = rotary_dim
lowerCamelCase__ = activation_function
lowerCamelCase__ = resid_pdrop
lowerCamelCase__ = embd_pdrop
lowerCamelCase__ = attn_pdrop
lowerCamelCase__ = layer_norm_epsilon
lowerCamelCase__ = initializer_range
lowerCamelCase__ = use_cache
lowerCamelCase__ = bos_token_id
lowerCamelCase__ = eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase ,eos_token_id=_lowerCAmelCase ,tie_word_embeddings=_lowerCAmelCase ,**_lowerCAmelCase )
class UpperCamelCase__ (a ):
'''simple docstring'''
def __init__( self ,_lowerCAmelCase ,_lowerCAmelCase = "default" ,_lowerCAmelCase = None ,_lowerCAmelCase = False ,):
super().__init__(_lowerCAmelCase ,task=_lowerCAmelCase ,patching_specs=_lowerCAmelCase ,use_past=_lowerCAmelCase )
if not getattr(self._config ,"""pad_token_id""" ,_lowerCAmelCase ):
# TODO: how to do that better?
lowerCamelCase__ = 0
@property
def UpperCamelCase_ ( self ):
lowerCamelCase__ = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase ,direction="""inputs""" )
lowerCamelCase__ = {0: """batch""", 1: """past_sequence + sequence"""}
else:
lowerCamelCase__ = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCamelCase_ ( self ):
return self._config.n_layer
@property
def UpperCamelCase_ ( self ):
return self._config.n_head
def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = -1 ,_lowerCAmelCase = -1 ,_lowerCAmelCase = False ,_lowerCAmelCase = None ,):
lowerCamelCase__ = super(_lowerCAmelCase ,self ).generate_dummy_inputs(
_lowerCAmelCase ,batch_size=_lowerCAmelCase ,seq_length=_lowerCAmelCase ,is_pair=_lowerCAmelCase ,framework=_lowerCAmelCase )
# We need to order the input in the way they appears in the forward()
lowerCamelCase__ = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
lowerCamelCase__ , lowerCamelCase__ = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
lowerCamelCase__ = seqlen + 2
lowerCamelCase__ = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
lowerCamelCase__ = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(self.num_layers )
]
lowerCamelCase__ = common_inputs["""attention_mask"""]
if self.use_past:
lowerCamelCase__ = ordered_inputs["""attention_mask"""].dtype
lowerCamelCase__ = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(_lowerCAmelCase ,_lowerCAmelCase ,dtype=_lowerCAmelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCamelCase_ ( self ):
return 13
| 50 | 1 |
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
__a = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__( datasets.BuilderConfig ):
"""simple docstring"""
a :Optional[datasets.Features] = None
a :str = "utf-8"
a :Optional[str] = None
a :Optional[str] = None
a :bool = True # deprecated
a :Optional[int] = None # deprecated
a :int = 10 << 20 # 10MB
a :Optional[bool] = None
class lowercase__( datasets.ArrowBasedBuilder ):
"""simple docstring"""
a :Any = JsonConfig
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
if self.config.block_size is not None:
logger.warning('''The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead''' )
lowercase_ = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'''The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.''' )
if self.config.newlines_in_values is not None:
raise ValueError('''The JSON loader parameter `newlines_in_values` is no longer supported''' )
return datasets.DatasetInfo(features=self.config.features )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> List[str]:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
lowercase_ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, list, tuple) ):
lowercase_ = data_files
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
lowercase_ = []
for split_name, files in data_files.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ = [files]
lowercase_ = [dl_manager.iter_files(SCREAMING_SNAKE_CASE_ ) for file in files]
splits.append(datasets.SplitGenerator(name=SCREAMING_SNAKE_CASE_ , gen_kwargs={'''files''': files} ) )
return splits
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : pa.Table ) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features ) - set(pa_table.column_names ):
lowercase_ = self.config.features.arrow_schema.field(SCREAMING_SNAKE_CASE_ ).type
lowercase_ = pa_table.append_column(SCREAMING_SNAKE_CASE_ , pa.array([None] * len(SCREAMING_SNAKE_CASE_ ) , type=SCREAMING_SNAKE_CASE_ ) )
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
lowercase_ = table_cast(SCREAMING_SNAKE_CASE_ , self.config.features.arrow_schema )
return pa_table
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> Optional[int]:
for file_idx, file in enumerate(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) ):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
# We keep only the field we are interested in
lowercase_ = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
else:
lowercase_ = dataset
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
# If the file has one json object per line
else:
with open(SCREAMING_SNAKE_CASE_ , '''rb''' ) as f:
lowercase_ = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
lowercase_ = max(self.config.chunksize // 3_2 , 1_6 << 1_0 )
lowercase_ = (
self.config.encoding_errors if self.config.encoding_errors is not None else '''strict'''
)
while True:
lowercase_ = f.read(self.config.chunksize )
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(SCREAMING_SNAKE_CASE_ )
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
lowercase_ = batch.decode(self.config.encoding , errors=SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' )
try:
while True:
try:
lowercase_ = paj.read_json(
io.BytesIO(SCREAMING_SNAKE_CASE_ ) , read_options=paj.ReadOptions(block_size=SCREAMING_SNAKE_CASE_ ) )
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(SCREAMING_SNAKE_CASE_ , pa.ArrowInvalid )
and "straddling" not in str(SCREAMING_SNAKE_CASE_ )
or block_size > len(SCREAMING_SNAKE_CASE_ )
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f'''Batch of {len(SCREAMING_SNAKE_CASE_ )} bytes couldn\'t be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.''' )
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
SCREAMING_SNAKE_CASE_ , encoding=self.config.encoding , errors=self.config.encoding_errors ) as f:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
except json.JSONDecodeError:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): # list is the only sequence type supported in JSON
try:
lowercase_ = set().union(*[row.keys() for row in dataset] )
lowercase_ = {col: [row.get(SCREAMING_SNAKE_CASE_ ) for row in dataset] for col in keys}
lowercase_ = pa.Table.from_pydict(SCREAMING_SNAKE_CASE_ )
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(f'''Not able to read records in the JSON file at {file}.''' ) from None
yield file_idx, self._cast_table(SCREAMING_SNAKE_CASE_ )
break
else:
logger.error(f'''Failed to read file \'{file}\' with error {type(SCREAMING_SNAKE_CASE_ )}: {e}''' )
raise ValueError(
f'''Not able to read records in the JSON file at {file}. '''
f'''You should probably indicate the field of the JSON file containing your records. '''
f'''This JSON file contain the following fields: {str(list(dataset.keys() ) )}. '''
f'''Select the correct one and provide it as `field=\'XXX\'` to the dataset loading method. ''' ) from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(SCREAMING_SNAKE_CASE_ )
batch_idx += 1
| 719 |
from __future__ import annotations
from collections.abc import Callable
def a ( snake_case__: Callable[[int | float], int | float] , snake_case__: int | float , snake_case__: int | float , snake_case__: int = 100 , ):
'''simple docstring'''
lowercase_ = x_start
lowercase_ = fnc(snake_case__ )
lowercase_ = 0.0
for _ in range(snake_case__ ):
# Approximates small segments of curve as linear and solve
# for trapezoidal area
lowercase_ = (x_end - x_start) / steps + xa
lowercase_ = fnc(snake_case__ )
area += abs(fxa + fxa ) * (xa - xa) / 2
# Increment step
lowercase_ = xa
lowercase_ = fxa
return area
if __name__ == "__main__":
def a ( snake_case__: List[Any] ):
'''simple docstring'''
return x**3 + x**2
print('f(x) = x^3 + x^2')
print('The area between the curve, x = -5, x = 5 and the x axis is:')
__a = 1_0
while i <= 1_0_0_0_0_0:
print(f"with {i} steps: {trapezoidal_area(f, -5, 5, i)}")
i *= 1_0
| 409 | 0 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def _A ( *_lowercase ) -> Tuple:
"""simple docstring"""
with open(_lowercase , 'r' ) as fh:
fcntl.flock(_lowercase , fcntl.LOCK_EX )
try:
print(*_lowercase )
finally:
fcntl.flock(_lowercase , fcntl.LOCK_UN )
__snake_case = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__snake_case = torch.device('''cuda''', local_rank)
__snake_case = socket.gethostname()
__snake_case = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__snake_case = dist.get_rank()
__snake_case = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 1 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowercase = cst_fwd.get(_SCREAMING_SNAKE_CASE , np.inf )
__lowercase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowercase = new_cost_f
__lowercase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowercase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowercase = -1
__lowercase = set()
__lowercase = set()
__lowercase = {source: 0}
__lowercase = {destination: 0}
__lowercase = {source: None}
__lowercase = {destination: None}
__lowercase = PriorityQueue()
__lowercase = PriorityQueue()
__lowercase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowercase , __lowercase = queue_forward.get()
visited_forward.add(_SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = queue_backward.get()
visited_backward.add(_SCREAMING_SNAKE_CASE )
__lowercase = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
__lowercase = pass_and_relaxation(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowercase = shortest_distance
return shortest_path_distance
snake_case__ : Union[str, Any] = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
snake_case__ : Optional[int] = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 402 | 0 |
_UpperCAmelCase : Union[str, Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
def UpperCamelCase ( lowercase_ : bytes ) -> bytes:
'''simple docstring'''
if not isinstance(_A , _A ):
lowercase =f'a bytes-like object is required, not \'{data.__class__.__name__}\''
raise TypeError(_A )
lowercase =''''''.join(bin(_A )[2:].zfill(8 ) for byte in data )
lowercase =len(_A ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase =b'''=''' * ((6 - len(_A ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_A ) % 6)
else:
lowercase =b''''''
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_A ) , 6 ) ).encode()
+ padding
)
def UpperCamelCase ( lowercase_ : str ) -> bytes:
'''simple docstring'''
if not isinstance(_A , _A ) and not isinstance(_A , _A ):
lowercase =(
'''argument should be a bytes-like object or ASCII string, '''
f'not \'{encoded_data.__class__.__name__}\''
)
raise TypeError(_A )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_A , _A ):
try:
lowercase =encoded_data.decode('''utf-8''' )
except UnicodeDecodeError:
raise ValueError('''base64 encoded data should only contain ASCII characters''' )
lowercase =encoded_data.count('''=''' )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_A ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase =encoded_data[:-padding]
lowercase =''''''.join(
bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase =''''''.join(
bin(B64_CHARSET.index(_A ) )[2:].zfill(6 ) for char in encoded_data )
lowercase =[
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_A ) , 8 )
]
return bytes(_A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
'''simple docstring'''
import argparse
import json
import subprocess
def UpperCamelCase ( lowercase_ : List[Any] , lowercase_ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase =[]
lowercase =(
f'curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"'
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowercase =subprocess.run(lowercase_ , shell=lowercase_ , stdout=subprocess.PIPE )
lowercase =output.stdout.decode('''utf-8''' )
lowercase =json.loads(lowercase_ )
lowercase =status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(lowercase_ ) )
if len(lowercase_ ) > 0:
lowercase ='''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f'The following runners are offline:\n{failed}' )
if __name__ == "__main__":
def UpperCamelCase ( lowercase_ : int ) -> Optional[int]:
'''simple docstring'''
return values.split(''',''' )
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_UpperCAmelCase : Optional[int] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 145 | 0 |
'''simple docstring'''
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_) -> list[int]:
UpperCamelCase__ : Union[str, Any] = [True] * limit
UpperCamelCase__ : int = False
UpperCamelCase__ : str = False
UpperCamelCase__ : Dict = True
for i in range(3 , int(limit**0.5 + 1) , 2):
UpperCamelCase__ : Dict = i * 2
while index < limit:
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Union[str, Any] = index + i
UpperCamelCase__ : List[Any] = [2]
for i in range(3 , lowerCamelCase_ , 2):
if is_prime[i]:
primes.append(lowerCamelCase_)
return primes
def __UpperCAmelCase ( lowerCamelCase_ = 1_000_000) -> int:
UpperCamelCase__ : int = prime_sieve(lowerCamelCase_)
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Tuple = 0
for i in range(len(lowerCamelCase_)):
for j in range(i + length , len(lowerCamelCase_)):
UpperCamelCase__ : Dict = sum(primes[i:j])
if sol >= ceiling:
break
if sol in primes:
UpperCamelCase__ : Any = j - i
UpperCamelCase__ : int = sol
return largest
if __name__ == "__main__":
print(f'''{solution() = }''')
| 596 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __lowercase (__lowerCamelCase ):
def __init__( self : Optional[int] , UpperCAmelCase_ : pyspark.sql.DataFrame , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : str = "arrow" , **UpperCAmelCase_ : str , ):
super().__init__(
split=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , keep_in_memory=UpperCAmelCase_ , streaming=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Union[str, Any] = load_from_cache_file
UpperCamelCase__ : int = file_format
UpperCamelCase__ : Any = Spark(
df=UpperCAmelCase_ , features=UpperCAmelCase_ , cache_dir=UpperCAmelCase_ , working_dir=UpperCAmelCase_ , **UpperCAmelCase_ , )
def __UpperCamelCase ( self : Optional[int]):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split)
UpperCamelCase__ : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split)
| 596 | 1 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 303 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase_ , unittest.TestCase ):
_lowerCAmelCase =DanceDiffusionPipeline
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_lowerCAmelCase =PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
_lowerCAmelCase =UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_lowerCAmelCase =False
_lowerCAmelCase =False
def UpperCAmelCase__ ( self : List[Any] ):
torch.manual_seed(0 )
snake_case__ : str = UNetaDModel(
block_out_channels=(3_2, 3_2, 6_4) , extra_in_channels=1_6 , sample_size=5_1_2 , sample_rate=1_6_0_0_0 , in_channels=2 , out_channels=2 , flip_sin_to_cos=_lowerCamelCase , use_timestep_embedding=_lowerCamelCase , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
snake_case__ : Any = IPNDMScheduler()
snake_case__ : Any = {
'unet': unet,
'scheduler': scheduler,
}
return components
def UpperCAmelCase__ ( self : Any , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple=0 ):
if str(_lowerCamelCase ).startswith('mps' ):
snake_case__ : Dict = torch.manual_seed(_lowerCamelCase )
else:
snake_case__ : Any = torch.Generator(device=_lowerCamelCase ).manual_seed(_lowerCamelCase )
snake_case__ : List[str] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def UpperCAmelCase__ ( self : int ):
snake_case__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
snake_case__ : Dict = self.get_dummy_components()
snake_case__ : Optional[int] = DanceDiffusionPipeline(**_lowerCamelCase )
snake_case__ : Optional[int] = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Tuple = self.get_dummy_inputs(_lowerCamelCase )
snake_case__ : Tuple = pipe(**_lowerCamelCase )
snake_case__ : Optional[Any] = output.audios
snake_case__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
snake_case__ : Optional[int] = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def UpperCAmelCase__ ( self : int ):
return super().test_save_load_local()
@skip_mps
def UpperCAmelCase__ ( self : Optional[int] ):
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().test_save_load_optional_components()
@skip_mps
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().test_attention_slicing_forward_pass()
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = torch_device
snake_case__ : Dict = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
snake_case__ : int = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : int = output.audios
snake_case__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Optional[Any] = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[int] = torch_device
snake_case__ : Any = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
snake_case__ : Tuple = pipe.to(_lowerCamelCase )
pipe.set_progress_bar_config(disable=_lowerCamelCase )
snake_case__ : Dict = torch.manual_seed(0 )
snake_case__ : Dict = pipe(generator=_lowerCamelCase , num_inference_steps=1_0_0 , audio_length_in_s=4.096 )
snake_case__ : Dict = output.audios
snake_case__ : str = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
snake_case__ : Any = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
| 303 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
A__ : int = logging.getLogger(__name__)
torch.set_grad_enabled(False)
A__ : Optional[int] = """cuda""" if torch.cuda.is_available() else """cpu"""
def UpperCAmelCase__ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int]=1_00 , UpperCAmelCase_ : Any=" " ) -> List[str]:
__lowerCamelCase : Any = text.split(UpperCAmelCase_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(UpperCAmelCase_ ) , UpperCAmelCase_ )]
def UpperCAmelCase__ ( UpperCAmelCase_ : dict ) -> dict:
__lowerCamelCase , __lowerCamelCase : List[Any] = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(UpperCAmelCase_ ):
titles.append(title if title is not None else '' )
texts.append(UpperCAmelCase_ )
return {"title": titles, "text": texts}
def UpperCAmelCase__ ( UpperCAmelCase_ : dict , UpperCAmelCase_ : DPRContextEncoder , UpperCAmelCase_ : DPRContextEncoderTokenizerFast ) -> dict:
__lowerCamelCase : Union[str, Any] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=UpperCAmelCase_ , padding='longest' , return_tensors='pt' )['input_ids']
__lowerCamelCase : Tuple = ctx_encoder(input_ids.to(device=UpperCAmelCase_ ) , return_dict=UpperCAmelCase_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def UpperCAmelCase__ ( UpperCAmelCase_ : "RagExampleArguments" , UpperCAmelCase_ : "ProcessingArguments" , UpperCAmelCase_ : "IndexHnswArguments" , ) -> Dict:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__lowerCamelCase : int = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__lowerCamelCase : str = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc )
# And compute the embeddings
__lowerCamelCase : Union[str, Any] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=UpperCAmelCase_ )
__lowerCamelCase : Dict = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
__lowerCamelCase : Any = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
__lowerCamelCase : List[str] = dataset.map(
partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_ ) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , )
# And finally save your dataset
__lowerCamelCase : Dict = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(UpperCAmelCase_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__lowerCamelCase : int = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=UpperCAmelCase_ )
# And save the index
__lowerCamelCase : Union[str, Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(UpperCAmelCase_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowerCamelCase : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowerCamelCase : str = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowerCamelCase : str = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowerCamelCase : Optional[str] = field(
default=str(Path(_UpperCAmelCase ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=_UpperCAmelCase , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowerCamelCase : int = field(
default=1_6 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = field(
default=7_6_8 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowerCamelCase : int = field(
default=1_2_8 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
A__ : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
A__ , A__ , A__ : Tuple = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
A__ : List[str] = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 13 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_SCREAMING_SNAKE_CASE : Any = """sshleifer/bart-tiny-random"""
_SCREAMING_SNAKE_CASE : List[str] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def __lowercase( self : str ) -> Dict:
return AutoConfig.from_pretrained(__lowerCamelCase )
def __lowercase( self : Optional[int] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : Dict = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.num_hidden_layers, 1 )
def __lowercase( self : List[Any] ) -> Optional[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : Optional[Any] = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
def __lowercase( self : str ) -> List[Any]:
UpperCamelCase__ ,*UpperCamelCase__ : str = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers )
def __lowercase( self : Union[str, Any] ) -> int:
UpperCamelCase__ ,*UpperCamelCase__ : int = create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=1, d=1 )
self.assertEqual(student.config.encoder_layers, 1 )
self.assertEqual(student.config.decoder_layers, 1 )
def __lowercase( self : Union[str, Any] ) -> Tuple:
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase, tempfile.mkdtemp(), e=__lowerCamelCase, d=__lowerCamelCase )
| 344 | 0 |
from typing import TYPE_CHECKING
from ..utils import _LazyModule
a = {
"config": [
"EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
"OnnxConfig",
"OnnxConfigWithPast",
"OnnxSeq2SeqConfigWithPast",
"PatchingSpec",
],
"convert": ["export", "validate_model_outputs"],
"features": ["FeaturesManager"],
"utils": ["ParameterFormat", "compute_serialized_parameters_size"],
}
if TYPE_CHECKING:
from .config import (
EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
OnnxConfig,
OnnxConfigWithPast,
OnnxSeqaSeqConfigWithPast,
PatchingSpec,
)
from .convert import export, validate_model_outputs
from .features import FeaturesManager
from .utils import ParameterFormat, compute_serialized_parameters_size
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 710 |
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def _SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
if os.name == "nt":
import msvcrt
_UpperCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(snake_case ) == 0:
# Read the keystroke
_UpperCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(snake_case )
if ord(snake_case ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
_UpperCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
_UpperCAmelCase = cha[1]
else:
_UpperCAmelCase = ch.decode(snake_case )
else:
_UpperCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase = sys.stdin.fileno()
_UpperCAmelCase = termios.tcgetattr(snake_case )
try:
tty.setraw(snake_case )
_UpperCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(snake_case , termios.TCSADRAIN , snake_case )
return ch
def _SCREAMING_SNAKE_CASE ( ) -> Dict:
_UpperCAmelCase = get_raw_chars()
if ord(snake_case ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(snake_case ) == KEYMAP["esc"]:
_UpperCAmelCase = get_raw_chars()
if ord(snake_case ) == KEYMAP["mod_int"]:
_UpperCAmelCase = get_raw_chars()
if ord(snake_case ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(snake_case ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(snake_case ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 175 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase_ ( __UpperCAmelCase : str , __UpperCAmelCase : complex , __UpperCAmelCase : str = "x" , __UpperCAmelCase : float = 10**-10 , __UpperCAmelCase : int = 1 , ) -> List[str]:
SCREAMING_SNAKE_CASE_ = symbols(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = lambdify(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = lambdify(__UpperCAmelCase , diff(__UpperCAmelCase , __UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ = starting_point
while True:
if diff_function(__UpperCAmelCase ) != 0:
SCREAMING_SNAKE_CASE_ = prev_guess - multiplicity * func(__UpperCAmelCase ) / diff_function(
__UpperCAmelCase )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
SCREAMING_SNAKE_CASE_ = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 31 | import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ = logging.get_logger(__name__)
a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
a_ = {
'tokenizer_file': {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json',
},
}
a_ = {
'gpt-neox-20b': 2_048,
}
class _lowercase ( snake_case_ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
def __init__( self : Dict , snake_case : Union[str, Any]=None , snake_case : int=None , snake_case : Union[str, Any]=None , snake_case : str="<|endoftext|>" , snake_case : Optional[Any]="<|endoftext|>" , snake_case : List[str]="<|endoftext|>" , snake_case : List[Any]=False , **snake_case : str , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , unk_token=snake_case , bos_token=snake_case , eos_token=snake_case , add_prefix_space=snake_case , **snake_case , )
UpperCamelCase_ : str = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
UpperCamelCase_ : Optional[int] = getattr(snake_case , pre_tok_state.pop('type' ) )
UpperCamelCase_ : Optional[Any] = add_prefix_space
UpperCamelCase_ : List[Any] = pre_tok_class(**snake_case )
UpperCamelCase_ : Optional[Any] = add_prefix_space
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , snake_case : "Conversation" ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case , add_special_tokens=snake_case ) + [self.eos_token_id] )
if len(snake_case ) > self.model_max_length:
UpperCamelCase_ : List[Any] = input_ids[-self.model_max_length :]
return input_ids
| 417 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
snake_case_ : Optional[Any] = logging.get_logger(__name__)
snake_case_ : List[Any] = {"vocab_file": "spiece.model"}
snake_case_ : List[str] = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
snake_case_ : Dict = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
snake_case_ : Any = "▁"
class __snake_case ( a ):
UpperCAmelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : str = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , _snake_case : Tuple , _snake_case : Any="</s>" , _snake_case : Union[str, Any]="<unk>" , _snake_case : List[str]="<pad>" , _snake_case : Optional[Any]=100 , _snake_case : str=None , _snake_case : Optional[Dict[str, Any]] = None , _snake_case : List[str]=True , **_snake_case : Tuple , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase_ = [F"""<extra_id_{i}>""" for i in range(_snake_case)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase_ = len(set(filter(lambda _snake_case: bool('''extra_id''' in str(_snake_case)) , _snake_case)))
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''')
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''')
UpperCAmelCase_ = legacy
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_snake_case , unk_token=_snake_case , pad_token=_snake_case , extra_ids=_snake_case , additional_special_tokens=_snake_case , sp_model_kwargs=self.sp_model_kwargs , legacy=_snake_case , **_snake_case , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = extra_ids
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(_snake_case)
@staticmethod
def lowerCamelCase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Optional[int]):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase_ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _snake_case , )
return max_model_length
@property
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = {self.convert_ids_to_tokens(_snake_case): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCamelCase ( self : Any , _snake_case : List[int] , _snake_case : Optional[List[int]] = None , _snake_case : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case , token_ids_a=_snake_case , already_has_special_tokens=_snake_case)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(_snake_case)) + [1]
return ([0] * len(_snake_case)) + [1] + ([0] * len(_snake_case)) + [1]
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
return list(
set(filter(lambda _snake_case: bool(re.search(r'''<extra_id_\d+>''' , _snake_case)) is not None , self.additional_special_tokens)))
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
return [self._convert_token_to_id(_snake_case) for token in self.get_sentinel_tokens()]
def lowerCamelCase ( self : str , _snake_case : List[int]):
"""simple docstring"""
if len(_snake_case) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
''' eos tokens being added.''')
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCamelCase ( self : List[Any] , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCamelCase ( self : Tuple , _snake_case : List[int] , _snake_case : Optional[List[int]] = None):
"""simple docstring"""
UpperCAmelCase_ = self._add_eos_if_not_present(_snake_case)
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase_ = self._add_eos_if_not_present(_snake_case)
return token_ids_a + token_ids_a
def __getstate__( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__( self : Optional[Any] , _snake_case : int):
"""simple docstring"""
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCAmelCase_ = {}
UpperCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def lowerCamelCase ( self : List[Any] , _snake_case : "TextInput" , **_snake_case : Any):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase_ = SPIECE_UNDERLINE + text.replace(_snake_case , ''' ''')
return super().tokenize(_snake_case , **_snake_case)
def lowerCamelCase ( self : List[str] , _snake_case : int , **_snake_case : Dict):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase_ = text.startswith(_snake_case)
if is_first:
UpperCAmelCase_ = text[1:]
UpperCAmelCase_ = self.sp_model.encode(_snake_case , out_type=_snake_case)
if not self.legacy and not is_first and not text.startswith(''' ''') and tokens[0].startswith(_snake_case):
UpperCAmelCase_ = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def lowerCamelCase ( self : List[Any] , _snake_case : Optional[Any]):
"""simple docstring"""
if token.startswith('''<extra_id_'''):
UpperCAmelCase_ = re.match(r'''<extra_id_(\d+)>''' , _snake_case)
UpperCAmelCase_ = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(_snake_case)
def lowerCamelCase ( self : str , _snake_case : Tuple):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase_ = self.sp_model.IdToPiece(_snake_case)
else:
UpperCAmelCase_ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def lowerCamelCase ( self : str , _snake_case : Any):
"""simple docstring"""
UpperCAmelCase_ = []
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_snake_case) + token
UpperCAmelCase_ = True
UpperCAmelCase_ = []
else:
current_sub_tokens.append(_snake_case)
UpperCAmelCase_ = False
out_string += self.sp_model.decode(_snake_case)
return out_string.strip()
def lowerCamelCase ( self : int , _snake_case : str , _snake_case : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(_snake_case):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""")
return
UpperCAmelCase_ = os.path.join(
_snake_case , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_snake_case) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , _snake_case)
elif not os.path.isfile(self.vocab_file):
with open(_snake_case , '''wb''') as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_snake_case)
return (out_vocab_file,)
| 705 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A (__A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(__A , __A )
def A (__A : Any ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
UpperCAmelCase_ = s_dict.pop(__A )
elif "subsample" in key:
UpperCAmelCase_ = s_dict.pop(__A )
def A (__A : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ = emb.weight.shape
UpperCAmelCase_ = nn.Linear(__A , __A , bias=__A )
UpperCAmelCase_ = emb.weight.data
return lin_layer
def A (__A : Dict , __A : str ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ = torch.load(__A , map_location='''cpu''' )
UpperCAmelCase_ = mam_aaa['''args''']
UpperCAmelCase_ = mam_aaa['''model''']
UpperCAmelCase_ = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(__A )
rename_keys(__A )
UpperCAmelCase_ = state_dict['''decoder.embed_tokens.weight'''].shape[0]
UpperCAmelCase_ = args.share_decoder_input_output_embed
UpperCAmelCase_ = [int(__A ) for i in args.conv_kernel_sizes.split(''',''' )]
UpperCAmelCase_ = SpeechaTextConfig(
vocab_size=__A , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(__A ) , conv_channels=args.conv_channels , conv_kernel_sizes=__A , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=__A , num_beams=5 , max_length=200 , use_cache=__A , decoder_start_token_id=2 , early_stopping=__A , )
UpperCAmelCase_ = SpeechaTextForConditionalGeneration(__A )
UpperCAmelCase_ , UpperCAmelCase_ = model.model.load_state_dict(__A , strict=__A )
if len(__A ) > 0 and not set(__A ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F""" but all the following weights are missing {missing}""" )
if tie_embeds:
UpperCAmelCase_ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
UpperCAmelCase_ = lm_head_weights
model.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
snake_case_ : int = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 169 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase : List[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {
'''MIT/ast-finetuned-audioset-10-10-0.4593''': (
'''https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'''
),
}
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_lowerCamelCase = '''audio-spectrogram-transformer'''
def __init__( self , _lowercase=7_6_8 , _lowercase=1_2 , _lowercase=1_2 , _lowercase=3_0_7_2 , _lowercase="gelu" , _lowercase=0.0 , _lowercase=0.0 , _lowercase=0.02 , _lowercase=1E-12 , _lowercase=1_6 , _lowercase=True , _lowercase=1_0 , _lowercase=1_0 , _lowercase=1_0_2_4 , _lowercase=1_2_8 , **_lowercase , ) -> List[str]:
'''simple docstring'''
super().__init__(**_lowercase )
snake_case_ : Tuple = hidden_size
snake_case_ : Optional[Any] = num_hidden_layers
snake_case_ : Any = num_attention_heads
snake_case_ : Tuple = intermediate_size
snake_case_ : List[str] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Dict = initializer_range
snake_case_ : List[str] = layer_norm_eps
snake_case_ : Optional[Any] = patch_size
snake_case_ : Tuple = qkv_bias
snake_case_ : Any = frequency_stride
snake_case_ : Dict = time_stride
snake_case_ : List[Any] = max_length
snake_case_ : List[str] = num_mel_bins
| 58 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =42
lowerCAmelCase__ =42
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
lowerCAmelCase__ =42
lowerCAmelCase__ =(16, 32, 96, 256)
lowerCAmelCase__ =jnp.floataa
def UpperCAmelCase ( self ) -> int:
"""simple docstring"""
snake_case__ : Dict =nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
snake_case__ : int =[]
for i in range(len(self.block_out_channels ) - 1 ):
snake_case__ : List[str] =self.block_out_channels[i]
snake_case__ : Optional[Any] =self.block_out_channels[i + 1]
snake_case__ : int =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : Dict =blocks
snake_case__ : int =nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[int] =self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : int =nn.silu(__SCREAMING_SNAKE_CASE )
for block in self.blocks:
snake_case__ : Tuple =block(__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =nn.silu(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =self.conv_out(__SCREAMING_SNAKE_CASE )
return embedding
@flax_register_to_config
class _lowerCAmelCase ( nn.Module , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ =32
lowerCAmelCase__ =4
lowerCAmelCase__ =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ =False
lowerCAmelCase__ =(320, 640, 1_280, 1_280)
lowerCAmelCase__ =2
lowerCAmelCase__ =8
lowerCAmelCase__ =None
lowerCAmelCase__ =1_280
lowerCAmelCase__ =0.0
lowerCAmelCase__ =False
lowerCAmelCase__ =jnp.floataa
lowerCAmelCase__ =True
lowerCAmelCase__ =0
lowerCAmelCase__ ="rgb"
lowerCAmelCase__ =(16, 32, 96, 256)
def UpperCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> FrozenDict:
"""simple docstring"""
snake_case__ : Dict =(1, self.in_channels, self.sample_size, self.sample_size)
snake_case__ : int =jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
snake_case__ : int =jnp.ones((1,) , dtype=jnp.intaa )
snake_case__ : int =jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case__ : Optional[Any] =(1, 3, self.sample_size * 8, self.sample_size * 8)
snake_case__ : str =jnp.zeros(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
snake_case__, snake_case__ : str =jax.random.split(__SCREAMING_SNAKE_CASE )
snake_case__ : str ={'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )["params"]
def UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
snake_case__ : Tuple =self.block_out_channels
snake_case__ : str =block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case__ : int =self.num_attention_heads or self.attention_head_dim
# input
snake_case__ : Optional[int] =nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case__ : Optional[Any] =FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case__ : List[Any] =FlaxTimestepEmbedding(__SCREAMING_SNAKE_CASE , dtype=self.dtype )
snake_case__ : Dict =FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
snake_case__ : Optional[int] =self.only_cross_attention
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[Any] =(only_cross_attention,) * len(self.down_block_types )
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] =(num_attention_heads,) * len(self.down_block_types )
# down
snake_case__ : List[str] =[]
snake_case__ : Any =[]
snake_case__ : Tuple =block_out_channels[0]
snake_case__ : Optional[int] =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
for i, down_block_type in enumerate(self.down_block_types ):
snake_case__ : List[Any] =output_channel
snake_case__ : int =block_out_channels[i]
snake_case__ : Dict =i == len(__SCREAMING_SNAKE_CASE ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case__ : str =FlaxCrossAttnDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
snake_case__ : int =FlaxDownBlockaD(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__SCREAMING_SNAKE_CASE )
for _ in range(self.layers_per_block ):
snake_case__ : Tuple =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
if not is_final_block:
snake_case__ : List[str] =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(__SCREAMING_SNAKE_CASE )
snake_case__ : int =down_blocks
snake_case__ : Tuple =controlnet_down_blocks
# mid
snake_case__ : Optional[Any] =block_out_channels[-1]
snake_case__ : Dict =FlaxUNetMidBlockaDCrossAttn(
in_channels=__SCREAMING_SNAKE_CASE , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
snake_case__ : Optional[int] =nn.Conv(
__SCREAMING_SNAKE_CASE , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 1.0 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = False , ) -> Union[FlaxControlNetOutput, Tuple]:
"""simple docstring"""
snake_case__ : List[str] =self.controlnet_conditioning_channel_order
if channel_order == "bgr":
snake_case__ : Any =jnp.flip(__SCREAMING_SNAKE_CASE , axis=1 )
# 1. time
if not isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ):
snake_case__ : List[Any] =jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case__ : Union[str, Any] =timesteps.astype(dtype=jnp.floataa )
snake_case__ : Any =jnp.expand_dims(__SCREAMING_SNAKE_CASE , 0 )
snake_case__ : Union[str, Any] =self.time_proj(__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] =self.time_embedding(__SCREAMING_SNAKE_CASE )
# 2. pre-process
snake_case__ : Optional[Any] =jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
snake_case__ : Any =self.conv_in(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] =jnp.transpose(__SCREAMING_SNAKE_CASE , (0, 2, 3, 1) )
snake_case__ : Tuple =self.controlnet_cond_embedding(__SCREAMING_SNAKE_CASE )
sample += controlnet_cond
# 3. down
snake_case__ : Tuple =(sample,)
for down_block in self.down_blocks:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__, snake_case__ : str =down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
else:
snake_case__, snake_case__ : Optional[int] =down_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
snake_case__ : Optional[Any] =self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , deterministic=not train )
# 5. contronet blocks
snake_case__ : Optional[Any] =()
for down_block_res_sample, controlnet_block in zip(__SCREAMING_SNAKE_CASE , self.controlnet_down_blocks ):
snake_case__ : List[Any] =controlnet_block(__SCREAMING_SNAKE_CASE )
controlnet_down_block_res_samples += (down_block_res_sample,)
snake_case__ : Union[str, Any] =controlnet_down_block_res_samples
snake_case__ : List[str] =self.controlnet_mid_block(__SCREAMING_SNAKE_CASE )
# 6. scaling
snake_case__ : str =[sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__SCREAMING_SNAKE_CASE , mid_block_res_sample=__SCREAMING_SNAKE_CASE )
| 381 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
_A : List[str] = logging.get_logger(__name__)
_A : Any = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_A : Dict = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
_A : Any = {
"""junnyu/roformer_chinese_small""": 15_36,
"""junnyu/roformer_chinese_base""": 15_36,
"""junnyu/roformer_chinese_char_small""": 5_12,
"""junnyu/roformer_chinese_char_base""": 5_12,
"""junnyu/roformer_small_discriminator""": 1_28,
"""junnyu/roformer_small_generator""": 1_28,
}
_A : str = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class a__ ( a_ ):
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase = RoFormerTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a="[UNK]" , _a="[SEP]" , _a="[PAD]" , _a="[CLS]" , _a="[MASK]" , _a=True , _a=None , **_a , ):
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
lowercase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , _a ) != do_lower_case
or pre_tok_state.get("strip_accents" , _a ) != strip_accents
):
lowercase : Union[str, Any] = getattr(_a , pre_tok_state.pop("type" ) )
lowercase : int = do_lower_case
lowercase : Optional[int] = strip_accents
lowercase : Any = pre_tok_class(**_a )
lowercase : Optional[int] = do_lower_case
def __getstate__( self ):
lowercase : List[Any] = self.__dict__.copy()
lowercase : List[str] = BertPreTokenizer()
return state
def __setstate__( self , _a ):
lowercase : int = d
lowercase : Tuple = self.__dict__["_tokenizer"].get_vocab()
lowercase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(_a ) )
def __magic_name__ ( self , _a , _a=None ):
lowercase : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __magic_name__ ( self , _a , _a = None ):
lowercase : List[Any] = [self.sep_token_id]
lowercase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __magic_name__ ( self , _a , _a = None ):
lowercase : Dict = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def __magic_name__ ( self , _a , _a=None , _a=None , _a=False , **_a , ):
lowercase : Any = BertPreTokenizer()
return super().save_pretrained(_a , _a , _a , _a , **_a )
| 518 |
"""simple docstring"""
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __magic_name__ ( __snake_case : List[Any] ) -> Optional[int]:
return x + 2
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Dict = "x = 3"
lowercase : Any = {}
lowercase : List[str] = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3} )
lowercase : List[str] = "x = y"
lowercase : Union[str, Any] = {"y": 5}
lowercase : List[str] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 5, "y": 5} )
def __magic_name__ ( self ):
lowercase : Any = "y = add_two(x)"
lowercase : str = {"x": 3}
lowercase : str = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
lowercase : Dict = evaluate(_a , {} , state=_a )
assert result is None
assert "tried to execute add_two" in out.out
def __magic_name__ ( self ):
lowercase : Any = "x = 3"
lowercase : Optional[int] = {}
lowercase : Tuple = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3} )
def __magic_name__ ( self ):
lowercase : int = "test_dict = {'x': x, 'y': add_two(x)}"
lowercase : str = {"x": 3}
lowercase : List[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
self.assertDictEqual(_a , {"x": 3, "y": 5} )
self.assertDictEqual(_a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __magic_name__ ( self ):
lowercase : Dict = "x = 3\ny = 5"
lowercase : int = {}
lowercase : List[Any] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 3, "y": 5} )
def __magic_name__ ( self ):
lowercase : int = "text = f'This is x: {x}.'"
lowercase : Optional[Any] = {"x": 3}
lowercase : Optional[Any] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_a , {"x": 3, "text": "This is x: 3."} )
def __magic_name__ ( self ):
lowercase : Any = "if x <= 3:\n y = 2\nelse:\n y = 5"
lowercase : List[Any] = {"x": 3}
lowercase : Optional[int] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_a , {"x": 3, "y": 2} )
lowercase : Union[str, Any] = {"x": 8}
lowercase : Optional[int] = evaluate(_a , {} , state=_a )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_a , {"x": 8, "y": 5} )
def __magic_name__ ( self ):
lowercase : List[Any] = "test_list = [x, add_two(x)]"
lowercase : int = {"x": 3}
lowercase : List[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
self.assertListEqual(_a , [3, 5] )
self.assertDictEqual(_a , {"x": 3, "test_list": [3, 5]} )
def __magic_name__ ( self ):
lowercase : List[str] = "y = x"
lowercase : int = {"x": 3}
lowercase : int = evaluate(_a , {} , state=_a )
assert result == 3
self.assertDictEqual(_a , {"x": 3, "y": 3} )
def __magic_name__ ( self ):
lowercase : List[Any] = "test_list = [x, add_two(x)]\ntest_list[1]"
lowercase : Optional[Any] = {"x": 3}
lowercase : Optional[Any] = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "test_list": [3, 5]} )
lowercase : int = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
lowercase : Optional[int] = {"x": 3}
lowercase : Tuple = evaluate(_a , {"add_two": add_two} , state=_a )
assert result == 5
self.assertDictEqual(_a , {"x": 3, "test_dict": {"x": 3, "y": 5}} )
def __magic_name__ ( self ):
lowercase : str = "x = 0\nfor i in range(3):\n x = i"
lowercase : int = {}
lowercase : Optional[int] = evaluate(_a , {"range": range} , state=_a )
assert result == 2
self.assertDictEqual(_a , {"x": 2, "i": 2} )
| 518 | 1 |
def _snake_case (__lowercase):
UpperCamelCase_ = [1]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 0, 0, 0
UpperCamelCase_ = ugly_nums[ia] * 2
UpperCamelCase_ = ugly_nums[ia] * 3
UpperCamelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __lowercase):
UpperCamelCase_ = min(__lowercase , __lowercase , __lowercase)
ugly_nums.append(__lowercase)
if next_num == next_a:
ia += 1
UpperCamelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCamelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCamelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(2_0_0) = }')
| 23 |
from __future__ import annotations
class __magic_name__ :
def __init__( self , __snake_case ) -> None:
'''simple docstring'''
__a =order
# a_{0} ... a_{k}
__a =[1.0] + [0.0] * order
# b_{0} ... b_{k}
__a =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__a =[0.0] * self.order
# y[n-1] ... y[n-k]
__a =[0.0] * self.order
def __magic_name__ ( self , __snake_case , __snake_case ) -> None:
'''simple docstring'''
if len(__snake_case ) < self.order:
__a =[1.0, *a_coeffs]
if len(__snake_case ) != self.order + 1:
__a =(
f'Expected a_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__snake_case )}'
)
raise ValueError(__snake_case )
if len(__snake_case ) != self.order + 1:
__a =(
f'Expected b_coeffs to have {self.order + 1} elements '
f'for {self.order}-order filter, got {len(__snake_case )}'
)
raise ValueError(__snake_case )
__a =a_coeffs
__a =b_coeffs
def __magic_name__ ( self , __snake_case ) -> float:
'''simple docstring'''
__a =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__a =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__a =self.input_history[:-1]
__a =self.output_history[:-1]
__a =sample
__a =result
return result
| 242 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase__( UpperCamelCase_ ):
lowerCAmelCase__ : Optional[Any] = ['audio_values', 'audio_mask']
def __init__( self ,__UpperCAmelCase=20_48 ,__UpperCAmelCase=1 ,__UpperCAmelCase=[16, 16] ,__UpperCAmelCase=1_28 ,__UpperCAmelCase=4_41_00 ,__UpperCAmelCase=86 ,__UpperCAmelCase=20_48 ,__UpperCAmelCase=0.0 ,**__UpperCAmelCase ,) -> Any:
super().__init__(
feature_size=__UpperCAmelCase ,sampling_rate=__UpperCAmelCase ,padding_value=__UpperCAmelCase ,**__UpperCAmelCase ,)
A__ =spectrogram_length
A__ =num_channels
A__ =patch_size
A__ =feature_size // self.patch_size[1]
A__ =n_fft
A__ =sampling_rate // hop_length_to_sampling_rate
A__ =sampling_rate
A__ =padding_value
A__ =mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_2_0_5_0.0 ,sampling_rate=__UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T
def snake_case__ ( self ,__UpperCAmelCase ) -> np.ndarray:
A__ =spectrogram(
__UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=8_0.0 ,)
A__ =log_spec[:, :-1]
A__ =log_spec - 2_0.0
A__ =np.clip(log_spec / 4_0.0 ,-2.0 ,0.0 ) + 1.0
return log_spec
def __call__( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = True ,__UpperCAmelCase = None ,__UpperCAmelCase = False ,__UpperCAmelCase = False ,**__UpperCAmelCase ,) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
A__ =isinstance(__UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
A__ =is_batched_numpy or (
isinstance(__UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A__ =[np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase ,np.ndarray ):
A__ =np.asarray(__UpperCAmelCase ,dtype=np.floataa )
elif isinstance(__UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A__ =raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A__ =[np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
A__ =[
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] ,__UpperCAmelCase ):
A__ =[np.asarray(__UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
A__ =max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
A__ =[
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
A__ =np.array(__UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
A__ =max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
A__ =np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
A__ =padded_audio_features * self.padding_value
for i in range(len(__UpperCAmelCase ) ):
A__ =audio_features[i]
A__ =feature
# return as BatchFeature
if return_attention_mask:
A__ ={'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
A__ ={'''audio_values''': padded_audio_features}
A__ =BatchFeature(data=__UpperCAmelCase ,tensor_type=__UpperCAmelCase )
return encoded_inputs
| 719 | """simple docstring"""
from __future__ import annotations
import pandas as pd
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [0] * no_of_processes
A__ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(UpperCamelCase__ ):
A__ = burst_time[i]
A__ = 0
A__ = 0
A__ = 999_999_999
A__ = 0
A__ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(UpperCamelCase__ ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A__ = remaining_time[j]
A__ = j
A__ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A__ = remaining_time[short]
if minm == 0:
A__ = 999_999_999
if remaining_time[short] == 0:
complete += 1
A__ = False
# Find finish time of current process
A__ = increment_time + 1
# Calculate waiting time
A__ = finish_time - arrival_time[short]
A__ = finar - burst_time[short]
if waiting_time[short] < 0:
A__ = 0
# Increment time
increment_time += 1
return waiting_time
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = [0] * no_of_processes
for i in range(UpperCamelCase__ ):
A__ = burst_time[i] + waiting_time[i]
return turn_around_time
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = 0
A__ = 0
for i in range(UpperCamelCase__ ):
A__ = total_waiting_time + waiting_time[i]
A__ = total_turn_around_time + turn_around_time[i]
print(F'''Average waiting time = {total_waiting_time / no_of_processes:.5f}''' )
print('Average turn around time =' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("Enter how many process you want to analyze")
__lowerCamelCase = int(input())
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = [0] * no_of_processes
__lowerCamelCase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("Enter the arrival time and burst time for process:--" + str(i + 1))
__lowerCamelCase , __lowerCamelCase = map(int, input().split())
__lowerCamelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowerCamelCase = burst_time
__lowerCamelCase = no_of_processes
__lowerCamelCase = waiting_time
__lowerCamelCase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowerCamelCase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"Process",
"BurstTime",
"ArrivalTime",
"WaitingTime",
"TurnAroundTime",
],
)
# Printing the dataFrame
pd.set_option("display.max_rows", fcfs.shape[0] + 1)
print(fcfs)
| 536 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowercase ( __lowercase , __lowercase , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : List[str] = AutoencoderKL
_SCREAMING_SNAKE_CASE : Union[str, Any] = "sample"
_SCREAMING_SNAKE_CASE : Union[str, Any] = 1e-2
@property
def a ( self : List[str] ) -> Optional[int]:
__snake_case = 4
__snake_case = 3
__snake_case = (32, 32)
__snake_case = floats_tensor((batch_size, num_channels) + sizes ).to(SCREAMING_SNAKE_CASE_ )
return {"sample": image}
@property
def a ( self : List[Any] ) -> List[Any]:
return (3, 32, 32)
@property
def a ( self : int ) -> int:
return (3, 32, 32)
def a ( self : Tuple ) -> Union[str, Any]:
__snake_case = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
__snake_case = self.dummy_input
return init_dict, inputs_dict
def a ( self : Optional[Any] ) -> Any:
pass
def a ( self : Tuple ) -> List[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def a ( self : List[str] ) -> int:
# enable deterministic behavior for gradient checkpointing
__snake_case , __snake_case = self.prepare_init_args_and_inputs_for_common()
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
assert not model.is_gradient_checkpointing and model.training
__snake_case = model(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__snake_case = torch.randn_like(SCREAMING_SNAKE_CASE_ )
__snake_case = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__snake_case = self.model_class(**SCREAMING_SNAKE_CASE_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(SCREAMING_SNAKE_CASE_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__snake_case = model_a(**SCREAMING_SNAKE_CASE_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__snake_case = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
__snake_case = dict(model.named_parameters() )
__snake_case = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def a ( self : int ) -> int:
__snake_case , __snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(SCREAMING_SNAKE_CASE_ )
__snake_case = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def a ( self : Optional[int] ) -> List[str]:
__snake_case = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__snake_case = model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
if torch_device == "mps":
__snake_case = torch.manual_seed(0 )
else:
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__snake_case = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__snake_case = image.to(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).sample
__snake_case = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__snake_case = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
__snake_case = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
__snake_case = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
def a ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
return f'gaussian_noise_s={seed}_shape={"_".join([str(SCREAMING_SNAKE_CASE_ ) for s in shape] )}.npy'
def a ( self : Optional[Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any]=0 , SCREAMING_SNAKE_CASE_ : int=(4, 3, 512, 512) , SCREAMING_SNAKE_CASE_ : str=False ) -> int:
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = torch.from_numpy(load_hf_numpy(self.get_file_format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) ).to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
return image
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple="CompVis/stable-diffusion-v1-4" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False ) -> List[str]:
__snake_case = 'fp16' if fpaa else None
__snake_case = torch.floataa if fpaa else torch.floataa
__snake_case = AutoencoderKL.from_pretrained(
SCREAMING_SNAKE_CASE_ , subfolder='vae' , torch_dtype=SCREAMING_SNAKE_CASE_ , revision=SCREAMING_SNAKE_CASE_ , )
model.to(SCREAMING_SNAKE_CASE_ ).eval()
return model
def a ( self : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
if torch_device == "mps":
return torch.manual_seed(SCREAMING_SNAKE_CASE_ )
return torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , sample_posterior=SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def a ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> List[Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model(SCREAMING_SNAKE_CASE_ ).sample
assert sample.shape == image.shape
__snake_case = sample[-1, -2:, -2:, :2].flatten().float().cpu()
__snake_case = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> int:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def a ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> str:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__snake_case = sample[-1, -2:, :2, -2:].flatten().float().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : Any , SCREAMING_SNAKE_CASE_ : int ) -> Tuple:
__snake_case = self.get_sd_vae_model(fpaa=SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) , fpaa=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ) -> str:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__snake_case = model.decode(SCREAMING_SNAKE_CASE_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def a ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Union[str, Any]:
__snake_case = self.get_sd_vae_model()
__snake_case = self.get_sd_image(SCREAMING_SNAKE_CASE_ )
__snake_case = self.get_generator(SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
__snake_case = model.encode(SCREAMING_SNAKE_CASE_ ).latent_dist
__snake_case = dist.sample(generator=SCREAMING_SNAKE_CASE_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__snake_case = sample[0, -1, -3:, -3:].flatten().cpu()
__snake_case = torch.tensor(SCREAMING_SNAKE_CASE_ )
__snake_case = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=SCREAMING_SNAKE_CASE_ )
| 56 | 0 |
'''simple docstring'''
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class lowerCAmelCase ( a , unittest.TestCase ):
_lowerCamelCase : Tuple = WavaVecaPhonemeCTCTokenizer
_lowerCamelCase : int = False
def lowercase ( self ):
super().setUp()
lowerCAmelCase : List[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ' )
lowerCAmelCase : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) )
lowerCAmelCase : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(snake_case__ ) + '\n' )
def lowercase ( self , snake_case__ , snake_case__=False , snake_case__=20 , snake_case__=5 ):
lowerCAmelCase : Dict = [(i, tokenizer.decode([i] , clean_up_tokenization_spaces=snake_case__ )) for i in range(len(snake_case__ ) )]
lowerCAmelCase : Union[str, Any] = list(filter(lambda snake_case__ : [t[0]] == tokenizer.encode(t[1] , do_phonemize=snake_case__ ) , snake_case__ ) )
if max_length is not None and len(snake_case__ ) > max_length:
lowerCAmelCase : Any = toks[:max_length]
if min_length is not None and len(snake_case__ ) < min_length and len(snake_case__ ) > 0:
while len(snake_case__ ) < min_length:
lowerCAmelCase : Any = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase : List[Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase : Optional[Any] = tokenizer.decode(snake_case__ , clean_up_tokenization_spaces=snake_case__ )
if " " not in output_txt and len(snake_case__ ) > 1:
lowerCAmelCase : Optional[Any] = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=snake_case__ )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=snake_case__ )
)
if with_prefix_space:
lowerCAmelCase : Optional[Any] = ' ' + output_txt
lowerCAmelCase : Tuple = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ )
return output_txt, output_ids
def lowercase ( self , **snake_case__ ):
kwargs.update(self.special_tokens_map )
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname , **snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
# check adding a single token
tokenizer.add_tokens('xxx' )
lowerCAmelCase : Union[str, Any] = tokenizer('m xxx ɪ' , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [13, 392, 17] ) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'] )
lowerCAmelCase : List[Any] = tokenizer('m aaa ɪ ccc' , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [13, 393, 17, 395] ) # aaa and ccc should be after xxx and 2 after aaa
lowerCAmelCase : Optional[int] = tokenizer('maɪ c' , do_phonemize=snake_case__ ).input_ids
self.assertEqual(snake_case__ , [3, 200] ) # mai should be <unk> (=3)
def lowercase ( self ):
lowerCAmelCase : Any = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase : Optional[Any] = 'Hello how are you'
lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
self.assertEqual(snake_case__ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
def lowercase ( self ):
lowerCAmelCase : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase : Optional[Any] = 'Hello how are you'
lowerCAmelCase : Optional[Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids )
def lowercase ( self ):
lowerCAmelCase : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase : Any = 'Hello how are you'
lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
lowerCAmelCase : Union[str, Any] = tokenizer.decode(tokenizer(snake_case__ ).input_ids )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98],
[24, 22, 5, 24, 22, 5, 77],
]
lowerCAmelCase : Any = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : Any = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase : Optional[int] = 'Hello how are you'
lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
self.assertEqual(snake_case__ , 'h ə l oʊ | h aʊ | ɑːɹ | j uː |' )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase : Any = 'Hello how are you'
lowerCAmelCase : List[str] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
self.assertEqual(tokenizer(snake_case__ ).input_ids , tokenizer(snake_case__ , do_phonemize=snake_case__ ).input_ids )
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
lowerCAmelCase : Dict = [
[11, 5, 15, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 15, 8, tokenizer.word_delimiter_token_id, 98],
[tokenizer.word_delimiter_token_id, 24, 22, tokenizer.word_delimiter_token_id, 5, 24, 22, 5, 77],
]
# fmt: on
# decode with word_del_token filter
lowerCAmelCase : Any = tokenizer.decode(sample_ids[0] )
lowerCAmelCase : Optional[int] = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'] )
# decode with no word_del_token filter
lowerCAmelCase : Union[str, Any] = tokenizer.decode(sample_ids[0] , filter_word_delimiter_token=snake_case__ )
lowerCAmelCase : Any = tokenizer.batch_decode(snake_case__ , filter_word_delimiter_token=snake_case__ )
self.assertEqual(snake_case__ , batch_tokens[0] )
self.assertEqual(snake_case__ , ['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'] )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase : Dict = 'Hello how are you'
lowerCAmelCase : Union[str, Any] = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
lowerCAmelCase : Tuple = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ )
self.assertEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token='|' )
tokenizer.add_tokens('|' )
lowerCAmelCase : str = 'Hello how are you'
lowerCAmelCase : Any = tokenizer.phonemize(snake_case__ , phonemizer_lang='en-us' )
lowerCAmelCase : Tuple = tokenizer.decode(tokenizer(snake_case__ ).input_ids , filter_word_delimiter_token=snake_case__ )
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |' )] ).strip() , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' , word_delimiter_token=snake_case__ )
lowerCAmelCase : str = 'Hello how are you'
lowerCAmelCase : Dict = tokenizer(snake_case__ , phonemizer_lang='en-us' ).input_ids
lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ , phonemizer_lang='fr-fr' ).input_ids
self.assertNotEqual(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = tokenizer.decode(snake_case__ )
lowerCAmelCase : Dict = tokenizer.decode(snake_case__ )
self.assertEqual(snake_case__ , 'h ə l oʊ h aʊ ɑːɹ j uː' )
self.assertEqual(snake_case__ , 'ɛ l o h aʊ a ʁ j u' )
def lowercase ( self ):
lowerCAmelCase : Union[str, Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
lowerCAmelCase : List[Any] = 'Hello how Are you'
lowerCAmelCase : str = 'hello how are you'
lowerCAmelCase : Optional[Any] = tokenizer(snake_case__ ).input_ids
lowerCAmelCase : str = tokenizer(snake_case__ ).input_ids
self.assertEqual(snake_case__ , snake_case__ )
def lowercase ( self ):
lowerCAmelCase : int = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft' )
tokenizer.add_tokens(['!', '?'] )
tokenizer.add_special_tokens({'cls_token': '$$$'} )
# fmt: off
lowerCAmelCase : Optional[int] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 8, 98, 392, 392, 393, 392, 392, 393, 394, 394],
[24, 22, 5, 24, 22, 5, 77, tokenizer.pad_token_id, 394, 394],
]
# fmt: on
lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(snake_case__ )
self.assertEqual(snake_case__ , ['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'] )
@staticmethod
def lowercase ( snake_case__ , snake_case__ ):
lowerCAmelCase : Any = [d[key] for d in offsets]
return retrieved_list
def lowercase ( self ):
lowerCAmelCase : Dict = self.get_tokenizer(word_delimiter_token='|' )
tokenizer.add_tokens('|' )
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
lowerCAmelCase : List[str] = [11, 5, 5, 5, 15, 15, tokenizer.pad_token_id, 15, 15, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 15, 8, 8, 8, tokenizer.word_delimiter_token_id, 98]
# fmt: on
lowerCAmelCase : Optional[int] = tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ , filter_word_delimiter_token=snake_case__ )
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys() ) , 2 )
self.assertTrue('text' in outputs )
self.assertTrue('char_offsets' in outputs )
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] , 'char' ) ) , outputs.text )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'char' ) , ['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'] )
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'start_offset' ) , [0, 1, 4, 7, 9, 11, 12, 15, 16] )
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] , 'end_offset' ) , [1, 4, 6, 9, 10, 12, 15, 16, 17] )
def lowercase ( self ):
lowerCAmelCase : Any = self.get_tokenizer(word_delimiter_token='|' )
def check_list_tuples_equal(snake_case__ , snake_case__ ):
self.assertTrue(isinstance(snake_case__ , snake_case__ ) )
self.assertTrue(isinstance(outputs_list[0] , snake_case__ ) )
# transform list to ModelOutput
lowerCAmelCase : List[Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]} )
self.assertListEqual(outputs_batch['text'] , outputs_batch_a['text'] )
def recursive_check(snake_case__ , snake_case__ ):
if isinstance(snake_case__ , snake_case__ ):
[recursive_check(snake_case__ , snake_case__ ) for la, la in zip(snake_case__ , snake_case__ )]
self.assertEqual(snake_case__ , snake_case__ )
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] , outputs_batch_a['char_offsets'] )
# fmt: off
lowerCAmelCase : List[str] = [
[11, 5, 15, tokenizer.pad_token_id, 15, 4, 8, 98, 32, 32, 32, 32, 4, 33, tokenizer.word_delimiter_token_id, 32, 32, 33, 34, 34],
[24, 22, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 24, 22, 22, 22, 4, 5, 77, tokenizer.pad_token_id, 22, 22, 4, 34, 34, 34, 34],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(snake_case__ , output_char_offsets=snake_case__ )
lowerCAmelCase : Union[str, Any] = [tokenizer.decode(snake_case__ , output_char_offsets=snake_case__ ) for ids in sample_ids]
check_list_tuples_equal(snake_case__ , snake_case__ )
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes' )
def lowercase ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes' )
def lowercase ( self ):
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency' )
def lowercase ( self ):
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing' )
def lowercase ( self ):
pass
def lowercase ( self ):
lowerCAmelCase : Dict = self.get_tokenizers(do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : Tuple = tokenizer.vocab_size
lowerCAmelCase : Union[str, Any] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
lowerCAmelCase : Union[str, Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
lowerCAmelCase : Optional[Any] = tokenizer.add_tokens(snake_case__ )
lowerCAmelCase : Tuple = tokenizer.vocab_size
lowerCAmelCase : Optional[int] = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size + len(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
lowerCAmelCase : int = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
lowerCAmelCase : Dict = tokenizer.add_special_tokens(snake_case__ )
lowerCAmelCase : str = tokenizer.vocab_size
lowerCAmelCase : int = len(snake_case__ )
self.assertNotEqual(snake_case__ , 0 )
self.assertEqual(snake_case__ , snake_case__ )
self.assertEqual(snake_case__ , len(snake_case__ ) )
self.assertEqual(snake_case__ , all_size_a + len(snake_case__ ) )
lowerCAmelCase : Union[str, Any] = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' , add_special_tokens=snake_case__ )
self.assertGreaterEqual(len(snake_case__ ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def lowercase ( self ):
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.' )
def lowercase ( self ):
pass
def lowercase ( self ):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
lowerCAmelCase : Optional[int] = self.get_tokenizers(fast=snake_case__ , do_lower_case=snake_case__ )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
lowerCAmelCase : Optional[Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_string(snake_case__ )
self.assertIsInstance(output['text'] , snake_case__ )
| 646 |
'''simple docstring'''
def __UpperCamelCase ( _A : int ) -> bool:
"""simple docstring"""
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 646 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : Tuple=3 , UpperCAmelCase_ : List[Any]=18 , UpperCAmelCase_ : Tuple=30 , UpperCAmelCase_ : List[Any]=400 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Dict=True , ):
SCREAMING_SNAKE_CASE : List[Any] = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE : Optional[Any] = parent
SCREAMING_SNAKE_CASE : List[Any] = batch_size
SCREAMING_SNAKE_CASE : Optional[int] = num_channels
SCREAMING_SNAKE_CASE : List[str] = image_size
SCREAMING_SNAKE_CASE : str = min_resolution
SCREAMING_SNAKE_CASE : Optional[int] = max_resolution
SCREAMING_SNAKE_CASE : Union[str, Any] = do_resize
SCREAMING_SNAKE_CASE : List[Any] = size
SCREAMING_SNAKE_CASE : Tuple = apply_ocr
def _A ( self : Union[str, Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ : str = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _A ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE : Dict = LayoutLMvaImageProcessingTester(self )
@property
def _A ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self : List[str] ):
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCAmelCase_ , "apply_ocr" ) )
def _A ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _A ( self : List[str] ):
pass
def _A ( self : Optional[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE : Any = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , UpperCAmelCase_ )
self.assertIsInstance(encoding.boxes , UpperCAmelCase_ )
# Test batched
SCREAMING_SNAKE_CASE : Any = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self : Optional[int] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[Any] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self : List[Any] ):
# Initialize image_processing
SCREAMING_SNAKE_CASE : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase_ , torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE : List[Any] = image_processing(UpperCAmelCase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self : Union[str, Any] ):
# with apply_OCR = True
SCREAMING_SNAKE_CASE : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
SCREAMING_SNAKE_CASE : Dict = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
SCREAMING_SNAKE_CASE : Any = Image.open(ds[0]["file"] ).convert("RGB" )
SCREAMING_SNAKE_CASE : Dict = image_processing(UpperCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
SCREAMING_SNAKE_CASE : Tuple = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
SCREAMING_SNAKE_CASE : int = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , UpperCAmelCase_ )
self.assertListEqual(encoding.boxes , UpperCAmelCase_ )
# with apply_OCR = False
SCREAMING_SNAKE_CASE : Tuple = LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Tuple = image_processing(UpperCAmelCase_ , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 62 |
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
def lowercase_ ( _snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[Any] = RobertaPreLayerNormConfig.from_pretrained(
_snake_case ,architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(hf_hub_download(repo_id=_snake_case ,filename="""pytorch_model.bin""" ) )
SCREAMING_SNAKE_CASE__ : Any = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] = tensor_value
SCREAMING_SNAKE_CASE__ : str = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=_snake_case ,config=_snake_case ,state_dict=_snake_case )
model.save_pretrained(_snake_case )
# convert tokenizer
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(_snake_case )
tokenizer.save_pretrained(_snake_case )
if __name__ == "__main__":
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Union[str, Any] = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 223 | 0 |
def _a ( UpperCAmelCase = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = 1, 1
lowerCamelCase__ : List[Any] = 2
while True:
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[str] = fa + fa
lowerCamelCase__ , lowerCamelCase__ : Tuple = fa, f
index += 1
for _ in str(UpperCAmelCase ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 130 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : List[str] = LEDTokenizer
_UpperCAmelCase : Dict = LEDTokenizerFast
_UpperCAmelCase : str = True
def __lowerCamelCase ( self : int ) ->Dict:
super().setUp()
lowerCamelCase__ : List[Any] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
lowerCamelCase__ : Union[str, Any] = dict(zip(A , range(len(A ) ) ) )
lowerCamelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
lowerCamelCase__ : List[str] = {'''unk_token''': '''<unk>'''}
lowerCamelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCamelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A ) )
def __lowerCamelCase ( self : List[Any] , **A : Optional[Any] ) ->Tuple:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : List[Any] , **A : Optional[int] ) ->int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A )
def __lowerCamelCase ( self : Dict , A : List[Any] ) ->Dict:
return "lower newer", "lower newer"
@cached_property
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __lowerCamelCase ( self : Optional[int] ) ->Optional[int]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
lowerCamelCase__ : Union[str, Any] = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
lowerCamelCase__ : Tuple = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : str = tokenizer(A , max_length=len(A ) , padding=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowerCamelCase__ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(A , A )
@require_torch
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : Dict = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : int = tokenizer(A , padding=A , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A )
self.assertIn('''attention_mask''' , A )
self.assertNotIn('''labels''' , A )
self.assertNotIn('''decoder_attention_mask''' , A )
@require_torch
def __lowerCamelCase ( self : str ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Dict = tokenizer(text_target=A , max_length=3_2 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(3_2 , targets['''input_ids'''].shape[1] )
@require_torch
def __lowerCamelCase ( self : List[str] ) ->Tuple:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = tokenizer(
['''I am a small frog''' * 1_0_2_4, '''I am a small frog'''] , padding=A , truncation=A , return_tensors='''pt''' )
self.assertIsInstance(A , A )
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2) )
@require_torch
def __lowerCamelCase ( self : int ) ->List[str]:
lowerCamelCase__ : List[str] = ['''A long paragraph for summarization.''']
lowerCamelCase__ : int = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Union[str, Any] = tokenizer(A , return_tensors='''pt''' )
lowerCamelCase__ : Union[str, Any] = tokenizer(text_target=A , return_tensors='''pt''' )
lowerCamelCase__ : Tuple = inputs['''input_ids''']
lowerCamelCase__ : Any = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __lowerCamelCase ( self : List[Any] ) ->Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowerCamelCase__ : Any = ['''Summary of the text.''', '''Another summary.''']
lowerCamelCase__ : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowerCamelCase__ : Any = tokenizer(A , padding=A )
lowerCamelCase__ : List[Any] = [[0] * len(A ) for x in encoded_output['''input_ids''']]
lowerCamelCase__ : List[Any] = tokenizer.pad(A )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A )
def __lowerCamelCase ( self : int ) ->str:
pass
def __lowerCamelCase ( self : str ) ->Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : Any = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : Tuple = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[Any] = '''A, <mask> AllenNLP sentence.'''
lowerCamelCase__ : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
lowerCamelCase__ : str = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
lowerCamelCase__ : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 130 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.